]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/grsecurity-3.0-3.17.2-201410312213.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-3.0-3.17.2-201410312213.patch
CommitLineData
6ebd87a7
PK
1diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2index 9de9813..1462492 100644
3--- a/Documentation/dontdiff
4+++ b/Documentation/dontdiff
5@@ -3,9 +3,11 @@
6 *.bc
7 *.bin
8 *.bz2
9+*.c.[012]*.*
10 *.cis
11 *.cpio
12 *.csp
13+*.dbg
14 *.dsp
15 *.dvi
16 *.elf
17@@ -15,6 +17,7 @@
18 *.gcov
19 *.gen.S
20 *.gif
21+*.gmo
22 *.grep
23 *.grp
24 *.gz
25@@ -51,14 +54,17 @@
26 *.tab.h
27 *.tex
28 *.ver
29+*.vim
30 *.xml
31 *.xz
32 *_MODULES
33+*_reg_safe.h
34 *_vga16.c
35 *~
36 \#*#
37 *.9
38-.*
39+.[^g]*
40+.gen*
41 .*.d
42 .mm
43 53c700_d.h
44@@ -72,9 +78,11 @@ Image
45 Module.markers
46 Module.symvers
47 PENDING
48+PERF*
49 SCCS
50 System.map*
51 TAGS
52+TRACEEVENT-CFLAGS
53 aconf
54 af_names.h
55 aic7*reg.h*
56@@ -83,6 +91,7 @@ aic7*seq.h*
57 aicasm
58 aicdb.h*
59 altivec*.c
60+ashldi3.S
61 asm-offsets.h
62 asm_offsets.h
63 autoconf.h*
64@@ -95,32 +104,40 @@ bounds.h
65 bsetup
66 btfixupprep
67 build
68+builtin-policy.h
69 bvmlinux
70 bzImage*
71 capability_names.h
72 capflags.c
73 classlist.h*
74+clut_vga16.c
75+common-cmds.h
76 comp*.log
77 compile.h*
78 conf
79 config
80 config-*
81 config_data.h*
82+config.c
83 config.mak
84 config.mak.autogen
85+config.tmp
86 conmakehash
87 consolemap_deftbl.c*
88 cpustr.h
89 crc32table.h*
90 cscope.*
91 defkeymap.c
92+devicetable-offsets.h
93 devlist.h*
94 dnotify_test
95 docproc
96 dslm
97+dtc-lexer.lex.c
98 elf2ecoff
99 elfconfig.h*
100 evergreen_reg_safe.h
101+exception_policy.conf
102 fixdep
103 flask.h
104 fore200e_mkfirm
105@@ -128,12 +145,15 @@ fore200e_pca_fw.c*
106 gconf
107 gconf.glade.h
108 gen-devlist
109+gen-kdb_cmds.c
110 gen_crc32table
111 gen_init_cpio
112 generated
113 genheaders
114 genksyms
115 *_gray256.c
116+hash
117+hid-example
118 hpet_example
119 hugepage-mmap
120 hugepage-shm
121@@ -148,14 +168,14 @@ int32.c
122 int4.c
123 int8.c
124 kallsyms
125-kconfig
126+kern_constants.h
127 keywords.c
128 ksym.c*
129 ksym.h*
130 kxgettext
131 lex.c
132 lex.*.c
133-linux
134+lib1funcs.S
135 logo_*.c
136 logo_*_clut224.c
137 logo_*_mono.c
138@@ -165,14 +185,15 @@ mach-types.h
139 machtypes.h
140 map
141 map_hugetlb
142-media
143 mconf
144+mdp
145 miboot*
146 mk_elfconfig
147 mkboot
148 mkbugboot
149 mkcpustr
150 mkdep
151+mkpiggy
152 mkprep
153 mkregtable
154 mktables
155@@ -188,6 +209,8 @@ oui.c*
156 page-types
157 parse.c
158 parse.h
159+parse-events*
160+pasyms.h
161 patches*
162 pca200e.bin
163 pca200e_ecd.bin2
164@@ -197,6 +220,7 @@ perf-archive
165 piggyback
166 piggy.gzip
167 piggy.S
168+pmu-*
169 pnmtologo
170 ppc_defs.h*
171 pss_boot.h
172@@ -206,7 +230,12 @@ r200_reg_safe.h
173 r300_reg_safe.h
174 r420_reg_safe.h
175 r600_reg_safe.h
176+randomize_layout_hash.h
177+randomize_layout_seed.h
178+realmode.lds
179+realmode.relocs
180 recordmcount
181+regdb.c
182 relocs
183 rlim_names.h
184 rn50_reg_safe.h
185@@ -216,8 +245,12 @@ series
186 setup
187 setup.bin
188 setup.elf
189+signing_key*
190+size_overflow_hash.h
191 sImage
192+slabinfo
193 sm_tbl*
194+sortextable
195 split-include
196 syscalltab.h
197 tables.c
198@@ -227,6 +260,7 @@ tftpboot.img
199 timeconst.h
200 times.h*
201 trix_boot.h
202+user_constants.h
203 utsrelease.h*
204 vdso-syms.lds
205 vdso.lds
206@@ -238,13 +272,17 @@ vdso32.lds
207 vdso32.so.dbg
208 vdso64.lds
209 vdso64.so.dbg
210+vdsox32.lds
211+vdsox32-syms.lds
212 version.h*
213 vmImage
214 vmlinux
215 vmlinux-*
216 vmlinux.aout
217 vmlinux.bin.all
218+vmlinux.bin.bz2
219 vmlinux.lds
220+vmlinux.relocs
221 vmlinuz
222 voffset.h
223 vsyscall.lds
224@@ -252,9 +290,12 @@ vsyscall_32.lds
225 wanxlfw.inc
226 uImage
227 unifdef
228+utsrelease.h
229 wakeup.bin
230 wakeup.elf
231 wakeup.lds
232+x509*
233 zImage*
234 zconf.hash.c
235+zconf.lex.c
236 zoffset.h
237diff --git a/Documentation/kbuild/makefiles.txt b/Documentation/kbuild/makefiles.txt
238index 764f599..c600e2f 100644
239--- a/Documentation/kbuild/makefiles.txt
240+++ b/Documentation/kbuild/makefiles.txt
241@@ -23,10 +23,11 @@ This document describes the Linux kernel Makefiles.
242 === 4 Host Program support
243 --- 4.1 Simple Host Program
244 --- 4.2 Composite Host Programs
245- --- 4.3 Using C++ for host programs
246- --- 4.4 Controlling compiler options for host programs
247- --- 4.5 When host programs are actually built
248- --- 4.6 Using hostprogs-$(CONFIG_FOO)
249+ --- 4.3 Defining shared libraries
250+ --- 4.4 Using C++ for host programs
251+ --- 4.5 Controlling compiler options for host programs
252+ --- 4.6 When host programs are actually built
253+ --- 4.7 Using hostprogs-$(CONFIG_FOO)
254
255 === 5 Kbuild clean infrastructure
256
257@@ -642,7 +643,29 @@ Both possibilities are described in the following.
258 Finally, the two .o files are linked to the executable, lxdialog.
259 Note: The syntax <executable>-y is not permitted for host-programs.
260
261---- 4.3 Using C++ for host programs
262+--- 4.3 Defining shared libraries
263+
264+ Objects with extension .so are considered shared libraries, and
265+ will be compiled as position independent objects.
266+ Kbuild provides support for shared libraries, but the usage
267+ shall be restricted.
268+ In the following example the libkconfig.so shared library is used
269+ to link the executable conf.
270+
271+ Example:
272+ #scripts/kconfig/Makefile
273+ hostprogs-y := conf
274+ conf-objs := conf.o libkconfig.so
275+ libkconfig-objs := expr.o type.o
276+
277+ Shared libraries always require a corresponding -objs line, and
278+ in the example above the shared library libkconfig is composed by
279+ the two objects expr.o and type.o.
280+ expr.o and type.o will be built as position independent code and
281+ linked as a shared library libkconfig.so. C++ is not supported for
282+ shared libraries.
283+
284+--- 4.4 Using C++ for host programs
285
286 kbuild offers support for host programs written in C++. This was
287 introduced solely to support kconfig, and is not recommended
288@@ -665,7 +688,7 @@ Both possibilities are described in the following.
289 qconf-cxxobjs := qconf.o
290 qconf-objs := check.o
291
292---- 4.4 Controlling compiler options for host programs
293+--- 4.5 Controlling compiler options for host programs
294
295 When compiling host programs, it is possible to set specific flags.
296 The programs will always be compiled utilising $(HOSTCC) passed
297@@ -693,7 +716,7 @@ Both possibilities are described in the following.
298 When linking qconf, it will be passed the extra option
299 "-L$(QTDIR)/lib".
300
301---- 4.5 When host programs are actually built
302+--- 4.6 When host programs are actually built
303
304 Kbuild will only build host-programs when they are referenced
305 as a prerequisite.
306@@ -724,7 +747,7 @@ Both possibilities are described in the following.
307 This will tell kbuild to build lxdialog even if not referenced in
308 any rule.
309
310---- 4.6 Using hostprogs-$(CONFIG_FOO)
311+--- 4.7 Using hostprogs-$(CONFIG_FOO)
312
313 A typical pattern in a Kbuild file looks like this:
314
315diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
316index 1edd5fd..107ff46 100644
317--- a/Documentation/kernel-parameters.txt
318+++ b/Documentation/kernel-parameters.txt
319@@ -1155,6 +1155,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
320 Format: <unsigned int> such that (rxsize & ~0x1fffc0) == 0.
321 Default: 1024
322
323+ grsec_proc_gid= [GRKERNSEC_PROC_USERGROUP] Chooses GID to
324+ ignore grsecurity's /proc restrictions
325+
326+
327 hashdist= [KNL,NUMA] Large hashes allocated during boot
328 are distributed across NUMA nodes. Defaults on
329 for 64-bit NUMA, off otherwise.
330@@ -2175,6 +2179,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
331 noexec=on: enable non-executable mappings (default)
332 noexec=off: disable non-executable mappings
333
334+ nopcid [X86-64]
335+ Disable PCID (Process-Context IDentifier) even if it
336+ is supported by the processor.
337+
338 nosmap [X86]
339 Disable SMAP (Supervisor Mode Access Prevention)
340 even if it is supported by processor.
341@@ -2467,6 +2475,30 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
342 the specified number of seconds. This is to be used if
343 your oopses keep scrolling off the screen.
344
345+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
346+ virtualization environments that don't cope well with the
347+ expand down segment used by UDEREF on X86-32 or the frequent
348+ page table updates on X86-64.
349+
350+ pax_sanitize_slab=
351+ Format: { 0 | 1 | off | fast | full }
352+ Options '0' and '1' are only provided for backward
353+ compatibility, 'off' or 'fast' should be used instead.
354+ 0|off : disable slab object sanitization
355+ 1|fast: enable slab object sanitization excluding
356+ whitelisted slabs (default)
357+ full : sanitize all slabs, even the whitelisted ones
358+
359+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
360+
361+ pax_extra_latent_entropy
362+ Enable a very simple form of latent entropy extraction
363+ from the first 4GB of memory as the bootmem allocator
364+ passes the memory pages to the buddy allocator.
365+
366+ pax_weakuderef [X86-64] enables the weaker but faster form of UDEREF
367+ when the processor supports PCID.
368+
369 pcbit= [HW,ISDN]
370
371 pcd. [PARIDE]
372diff --git a/Makefile b/Makefile
373index 390afde..33153b5 100644
374--- a/Makefile
375+++ b/Makefile
376@@ -303,8 +303,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
377
378 HOSTCC = gcc
379 HOSTCXX = g++
380-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
381-HOSTCXXFLAGS = -O2
382+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
383+HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
384+HOSTCXXFLAGS = -O2 -Wall -W -Wno-array-bounds
385
386 ifeq ($(shell $(HOSTCC) -v 2>&1 | grep -c "clang version"), 1)
387 HOSTCFLAGS += -Wno-unused-value -Wno-unused-parameter \
388@@ -450,8 +451,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn \
389 # Rules shared between *config targets and build targets
390
391 # Basic helpers built in scripts/
392-PHONY += scripts_basic
393-scripts_basic:
394+PHONY += scripts_basic gcc-plugins
395+scripts_basic: gcc-plugins
396 $(Q)$(MAKE) $(build)=scripts/basic
397 $(Q)rm -f .tmp_quiet_recordmcount
398
399@@ -625,6 +626,72 @@ endif
400 # Tell gcc to never replace conditional load with a non-conditional one
401 KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0)
402
403+ifndef DISABLE_PAX_PLUGINS
404+ifeq ($(call cc-ifversion, -ge, 0408, y), y)
405+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCXX)" "$(HOSTCXX)" "$(CC)")
406+else
407+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(HOSTCXX)" "$(CC)")
408+endif
409+ifneq ($(PLUGINCC),)
410+ifdef CONFIG_PAX_CONSTIFY_PLUGIN
411+CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
412+endif
413+ifdef CONFIG_PAX_MEMORY_STACKLEAK
414+STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
415+STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
416+endif
417+ifdef CONFIG_KALLOCSTAT_PLUGIN
418+KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
419+endif
420+ifdef CONFIG_PAX_KERNEXEC_PLUGIN
421+KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
422+KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
423+KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
424+endif
425+ifdef CONFIG_GRKERNSEC_RANDSTRUCT
426+RANDSTRUCT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/randomize_layout_plugin.so -DRANDSTRUCT_PLUGIN
427+ifdef CONFIG_GRKERNSEC_RANDSTRUCT_PERFORMANCE
428+RANDSTRUCT_PLUGIN_CFLAGS += -fplugin-arg-randomize_layout_plugin-performance-mode
429+endif
430+endif
431+ifdef CONFIG_CHECKER_PLUGIN
432+ifeq ($(call cc-ifversion, -ge, 0406, y), y)
433+CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
434+endif
435+endif
436+COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so
437+ifdef CONFIG_PAX_SIZE_OVERFLOW
438+SIZE_OVERFLOW_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN
439+endif
440+ifdef CONFIG_PAX_LATENT_ENTROPY
441+LATENT_ENTROPY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/latent_entropy_plugin.so -DLATENT_ENTROPY_PLUGIN
442+endif
443+ifdef CONFIG_PAX_MEMORY_STRUCTLEAK
444+STRUCTLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/structleak_plugin.so -DSTRUCTLEAK_PLUGIN
445+endif
446+GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
447+GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS)
448+GCC_PLUGINS_CFLAGS += $(SIZE_OVERFLOW_PLUGIN_CFLAGS) $(LATENT_ENTROPY_PLUGIN_CFLAGS) $(STRUCTLEAK_PLUGIN_CFLAGS)
449+GCC_PLUGINS_CFLAGS += $(RANDSTRUCT_PLUGIN_CFLAGS)
450+GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
451+export PLUGINCC GCC_PLUGINS_CFLAGS GCC_PLUGINS_AFLAGS CONSTIFY_PLUGIN LATENT_ENTROPY_PLUGIN_CFLAGS
452+ifeq ($(KBUILD_EXTMOD),)
453+gcc-plugins:
454+ $(Q)$(MAKE) $(build)=tools/gcc
455+else
456+gcc-plugins: ;
457+endif
458+else
459+gcc-plugins:
460+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
461+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
462+else
463+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
464+endif
465+ $(Q)echo "PAX_MEMORY_STACKLEAK, constification, PAX_LATENT_ENTROPY and other features will be less secure. PAX_SIZE_OVERFLOW will not be active."
466+endif
467+endif
468+
469 ifdef CONFIG_READABLE_ASM
470 # Disable optimizations that make assembler listings hard to read.
471 # reorder blocks reorders the control in the function
472@@ -717,7 +784,7 @@ KBUILD_CFLAGS += $(call cc-option, -gsplit-dwarf, -g)
473 else
474 KBUILD_CFLAGS += -g
475 endif
476-KBUILD_AFLAGS += -Wa,-gdwarf-2
477+KBUILD_AFLAGS += -Wa,--gdwarf-2
478 endif
479 ifdef CONFIG_DEBUG_INFO_DWARF4
480 KBUILD_CFLAGS += $(call cc-option, -gdwarf-4,)
481@@ -867,7 +934,7 @@ export mod_sign_cmd
482
483
484 ifeq ($(KBUILD_EXTMOD),)
485-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
486+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
487
488 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
489 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
490@@ -916,6 +983,8 @@ endif
491
492 # The actual objects are generated when descending,
493 # make sure no implicit rule kicks in
494+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
495+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
496 $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
497
498 # Handle descending into subdirectories listed in $(vmlinux-dirs)
499@@ -925,7 +994,7 @@ $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
500 # Error messages still appears in the original language
501
502 PHONY += $(vmlinux-dirs)
503-$(vmlinux-dirs): prepare scripts
504+$(vmlinux-dirs): gcc-plugins prepare scripts
505 $(Q)$(MAKE) $(build)=$@
506
507 define filechk_kernel.release
508@@ -968,10 +1037,13 @@ prepare1: prepare2 $(version_h) include/generated/utsrelease.h \
509
510 archprepare: archheaders archscripts prepare1 scripts_basic
511
512+prepare0: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
513+prepare0: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
514 prepare0: archprepare FORCE
515 $(Q)$(MAKE) $(build)=.
516
517 # All the preparing..
518+prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
519 prepare: prepare0
520
521 # Generate some files
522@@ -1086,6 +1158,8 @@ all: modules
523 # using awk while concatenating to the final file.
524
525 PHONY += modules
526+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
527+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
528 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
529 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
530 @$(kecho) ' Building modules, stage 2.';
531@@ -1101,7 +1175,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
532
533 # Target to prepare building external modules
534 PHONY += modules_prepare
535-modules_prepare: prepare scripts
536+modules_prepare: gcc-plugins prepare scripts
537
538 # Target to install modules
539 PHONY += modules_install
540@@ -1167,7 +1241,10 @@ MRPROPER_FILES += .config .config.old .version .old_version $(version_h) \
541 Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \
542 signing_key.priv signing_key.x509 x509.genkey \
543 extra_certificates signing_key.x509.keyid \
544- signing_key.x509.signer include/linux/version.h
545+ signing_key.x509.signer include/linux/version.h \
546+ tools/gcc/size_overflow_plugin/size_overflow_hash_aux.h \
547+ tools/gcc/size_overflow_plugin/size_overflow_hash.h \
548+ tools/gcc/randomize_layout_seed.h
549
550 # clean - Delete most, but leave enough to build external modules
551 #
552@@ -1206,7 +1283,7 @@ distclean: mrproper
553 @find $(srctree) $(RCS_FIND_IGNORE) \
554 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
555 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
556- -o -name '.*.rej' -o -name '*%' -o -name 'core' \) \
557+ -o -name '.*.rej' -o -name '*.so' -o -name '*%' -o -name 'core' \) \
558 -type f -print | xargs rm -f
559
560
561@@ -1372,6 +1449,8 @@ PHONY += $(module-dirs) modules
562 $(module-dirs): crmodverdir $(objtree)/Module.symvers
563 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
564
565+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
566+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
567 modules: $(module-dirs)
568 @$(kecho) ' Building modules, stage 2.';
569 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
570@@ -1512,17 +1591,21 @@ else
571 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
572 endif
573
574-%.s: %.c prepare scripts FORCE
575+%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
576+%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
577+%.s: %.c gcc-plugins prepare scripts FORCE
578 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
579 %.i: %.c prepare scripts FORCE
580 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
581-%.o: %.c prepare scripts FORCE
582+%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
583+%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
584+%.o: %.c gcc-plugins prepare scripts FORCE
585 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
586 %.lst: %.c prepare scripts FORCE
587 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
588-%.s: %.S prepare scripts FORCE
589+%.s: %.S gcc-plugins prepare scripts FORCE
590 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
591-%.o: %.S prepare scripts FORCE
592+%.o: %.S gcc-plugins prepare scripts FORCE
593 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
594 %.symtypes: %.c prepare scripts FORCE
595 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
596@@ -1532,11 +1615,15 @@ endif
597 $(cmd_crmodverdir)
598 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
599 $(build)=$(build-dir)
600-%/: prepare scripts FORCE
601+%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
602+%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
603+%/: gcc-plugins prepare scripts FORCE
604 $(cmd_crmodverdir)
605 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
606 $(build)=$(build-dir)
607-%.ko: prepare scripts FORCE
608+%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
609+%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
610+%.ko: gcc-plugins prepare scripts FORCE
611 $(cmd_crmodverdir)
612 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
613 $(build)=$(build-dir) $(@:.ko=.o)
614diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
615index ed60a1e..47f1a55 100644
616--- a/arch/alpha/include/asm/atomic.h
617+++ b/arch/alpha/include/asm/atomic.h
618@@ -292,4 +292,14 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
619 #define atomic_dec(v) atomic_sub(1,(v))
620 #define atomic64_dec(v) atomic64_sub(1,(v))
621
622+#define atomic64_read_unchecked(v) atomic64_read(v)
623+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
624+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
625+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
626+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
627+#define atomic64_inc_unchecked(v) atomic64_inc(v)
628+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
629+#define atomic64_dec_unchecked(v) atomic64_dec(v)
630+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
631+
632 #endif /* _ALPHA_ATOMIC_H */
633diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
634index ad368a9..fbe0f25 100644
635--- a/arch/alpha/include/asm/cache.h
636+++ b/arch/alpha/include/asm/cache.h
637@@ -4,19 +4,19 @@
638 #ifndef __ARCH_ALPHA_CACHE_H
639 #define __ARCH_ALPHA_CACHE_H
640
641+#include <linux/const.h>
642
643 /* Bytes per L1 (data) cache line. */
644 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
645-# define L1_CACHE_BYTES 64
646 # define L1_CACHE_SHIFT 6
647 #else
648 /* Both EV4 and EV5 are write-through, read-allocate,
649 direct-mapped, physical.
650 */
651-# define L1_CACHE_BYTES 32
652 # define L1_CACHE_SHIFT 5
653 #endif
654
655+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
656 #define SMP_CACHE_BYTES L1_CACHE_BYTES
657
658 #endif
659diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
660index 968d999..d36b2df 100644
661--- a/arch/alpha/include/asm/elf.h
662+++ b/arch/alpha/include/asm/elf.h
663@@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
664
665 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
666
667+#ifdef CONFIG_PAX_ASLR
668+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
669+
670+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
671+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
672+#endif
673+
674 /* $0 is set by ld.so to a pointer to a function which might be
675 registered using atexit. This provides a mean for the dynamic
676 linker to call DT_FINI functions for shared libraries that have
677diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h
678index aab14a0..b4fa3e7 100644
679--- a/arch/alpha/include/asm/pgalloc.h
680+++ b/arch/alpha/include/asm/pgalloc.h
681@@ -29,6 +29,12 @@ pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
682 pgd_set(pgd, pmd);
683 }
684
685+static inline void
686+pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
687+{
688+ pgd_populate(mm, pgd, pmd);
689+}
690+
691 extern pgd_t *pgd_alloc(struct mm_struct *mm);
692
693 static inline void
694diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
695index d8f9b7e..f6222fa 100644
696--- a/arch/alpha/include/asm/pgtable.h
697+++ b/arch/alpha/include/asm/pgtable.h
698@@ -102,6 +102,17 @@ struct vm_area_struct;
699 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
700 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
701 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
702+
703+#ifdef CONFIG_PAX_PAGEEXEC
704+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
705+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
706+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
707+#else
708+# define PAGE_SHARED_NOEXEC PAGE_SHARED
709+# define PAGE_COPY_NOEXEC PAGE_COPY
710+# define PAGE_READONLY_NOEXEC PAGE_READONLY
711+#endif
712+
713 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
714
715 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
716diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
717index 2fd00b7..cfd5069 100644
718--- a/arch/alpha/kernel/module.c
719+++ b/arch/alpha/kernel/module.c
720@@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
721
722 /* The small sections were sorted to the end of the segment.
723 The following should definitely cover them. */
724- gp = (u64)me->module_core + me->core_size - 0x8000;
725+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
726 got = sechdrs[me->arch.gotsecindex].sh_addr;
727
728 for (i = 0; i < n; i++) {
729diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
730index 1402fcc..0b1abd2 100644
731--- a/arch/alpha/kernel/osf_sys.c
732+++ b/arch/alpha/kernel/osf_sys.c
733@@ -1298,10 +1298,11 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p)
734 generic version except that we know how to honor ADDR_LIMIT_32BIT. */
735
736 static unsigned long
737-arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
738- unsigned long limit)
739+arch_get_unmapped_area_1(struct file *filp, unsigned long addr, unsigned long len,
740+ unsigned long limit, unsigned long flags)
741 {
742 struct vm_unmapped_area_info info;
743+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
744
745 info.flags = 0;
746 info.length = len;
747@@ -1309,6 +1310,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
748 info.high_limit = limit;
749 info.align_mask = 0;
750 info.align_offset = 0;
751+ info.threadstack_offset = offset;
752 return vm_unmapped_area(&info);
753 }
754
755@@ -1341,20 +1343,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
756 merely specific addresses, but regions of memory -- perhaps
757 this feature should be incorporated into all ports? */
758
759+#ifdef CONFIG_PAX_RANDMMAP
760+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
761+#endif
762+
763 if (addr) {
764- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
765+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(addr), len, limit, flags);
766 if (addr != (unsigned long) -ENOMEM)
767 return addr;
768 }
769
770 /* Next, try allocating at TASK_UNMAPPED_BASE. */
771- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
772- len, limit);
773+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(current->mm->mmap_base), len, limit, flags);
774+
775 if (addr != (unsigned long) -ENOMEM)
776 return addr;
777
778 /* Finally, try allocating in low memory. */
779- addr = arch_get_unmapped_area_1 (PAGE_SIZE, len, limit);
780+ addr = arch_get_unmapped_area_1 (filp, PAGE_SIZE, len, limit, flags);
781
782 return addr;
783 }
784diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
785index 98838a0..b304fb4 100644
786--- a/arch/alpha/mm/fault.c
787+++ b/arch/alpha/mm/fault.c
788@@ -53,6 +53,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
789 __reload_thread(pcb);
790 }
791
792+#ifdef CONFIG_PAX_PAGEEXEC
793+/*
794+ * PaX: decide what to do with offenders (regs->pc = fault address)
795+ *
796+ * returns 1 when task should be killed
797+ * 2 when patched PLT trampoline was detected
798+ * 3 when unpatched PLT trampoline was detected
799+ */
800+static int pax_handle_fetch_fault(struct pt_regs *regs)
801+{
802+
803+#ifdef CONFIG_PAX_EMUPLT
804+ int err;
805+
806+ do { /* PaX: patched PLT emulation #1 */
807+ unsigned int ldah, ldq, jmp;
808+
809+ err = get_user(ldah, (unsigned int *)regs->pc);
810+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
811+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
812+
813+ if (err)
814+ break;
815+
816+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
817+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
818+ jmp == 0x6BFB0000U)
819+ {
820+ unsigned long r27, addr;
821+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
822+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
823+
824+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
825+ err = get_user(r27, (unsigned long *)addr);
826+ if (err)
827+ break;
828+
829+ regs->r27 = r27;
830+ regs->pc = r27;
831+ return 2;
832+ }
833+ } while (0);
834+
835+ do { /* PaX: patched PLT emulation #2 */
836+ unsigned int ldah, lda, br;
837+
838+ err = get_user(ldah, (unsigned int *)regs->pc);
839+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
840+ err |= get_user(br, (unsigned int *)(regs->pc+8));
841+
842+ if (err)
843+ break;
844+
845+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
846+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
847+ (br & 0xFFE00000U) == 0xC3E00000U)
848+ {
849+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
850+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
851+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
852+
853+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
854+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
855+ return 2;
856+ }
857+ } while (0);
858+
859+ do { /* PaX: unpatched PLT emulation */
860+ unsigned int br;
861+
862+ err = get_user(br, (unsigned int *)regs->pc);
863+
864+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
865+ unsigned int br2, ldq, nop, jmp;
866+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
867+
868+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
869+ err = get_user(br2, (unsigned int *)addr);
870+ err |= get_user(ldq, (unsigned int *)(addr+4));
871+ err |= get_user(nop, (unsigned int *)(addr+8));
872+ err |= get_user(jmp, (unsigned int *)(addr+12));
873+ err |= get_user(resolver, (unsigned long *)(addr+16));
874+
875+ if (err)
876+ break;
877+
878+ if (br2 == 0xC3600000U &&
879+ ldq == 0xA77B000CU &&
880+ nop == 0x47FF041FU &&
881+ jmp == 0x6B7B0000U)
882+ {
883+ regs->r28 = regs->pc+4;
884+ regs->r27 = addr+16;
885+ regs->pc = resolver;
886+ return 3;
887+ }
888+ }
889+ } while (0);
890+#endif
891+
892+ return 1;
893+}
894+
895+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
896+{
897+ unsigned long i;
898+
899+ printk(KERN_ERR "PAX: bytes at PC: ");
900+ for (i = 0; i < 5; i++) {
901+ unsigned int c;
902+ if (get_user(c, (unsigned int *)pc+i))
903+ printk(KERN_CONT "???????? ");
904+ else
905+ printk(KERN_CONT "%08x ", c);
906+ }
907+ printk("\n");
908+}
909+#endif
910
911 /*
912 * This routine handles page faults. It determines the address,
913@@ -133,8 +251,29 @@ retry:
914 good_area:
915 si_code = SEGV_ACCERR;
916 if (cause < 0) {
917- if (!(vma->vm_flags & VM_EXEC))
918+ if (!(vma->vm_flags & VM_EXEC)) {
919+
920+#ifdef CONFIG_PAX_PAGEEXEC
921+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
922+ goto bad_area;
923+
924+ up_read(&mm->mmap_sem);
925+ switch (pax_handle_fetch_fault(regs)) {
926+
927+#ifdef CONFIG_PAX_EMUPLT
928+ case 2:
929+ case 3:
930+ return;
931+#endif
932+
933+ }
934+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
935+ do_group_exit(SIGKILL);
936+#else
937 goto bad_area;
938+#endif
939+
940+ }
941 } else if (!cause) {
942 /* Allow reads even for write-only mappings */
943 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
944diff --git a/arch/arc/kernel/kgdb.c b/arch/arc/kernel/kgdb.c
945index a2ff5c5..ecf6a78 100644
946--- a/arch/arc/kernel/kgdb.c
947+++ b/arch/arc/kernel/kgdb.c
948@@ -158,11 +158,6 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
949 return -1;
950 }
951
952-unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs)
953-{
954- return instruction_pointer(regs);
955-}
956-
957 int kgdb_arch_init(void)
958 {
959 single_step_data.armed = 0;
960diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
961index 32cbbd5..c102df9 100644
962--- a/arch/arm/Kconfig
963+++ b/arch/arm/Kconfig
964@@ -1719,7 +1719,7 @@ config ALIGNMENT_TRAP
965
966 config UACCESS_WITH_MEMCPY
967 bool "Use kernel mem{cpy,set}() for {copy_to,clear}_user()"
968- depends on MMU
969+ depends on MMU && !PAX_MEMORY_UDEREF
970 default y if CPU_FEROCEON
971 help
972 Implement faster copy_to_user and clear_user methods for CPU
973@@ -1983,6 +1983,7 @@ config XIP_PHYS_ADDR
974 config KEXEC
975 bool "Kexec system call (EXPERIMENTAL)"
976 depends on (!SMP || PM_SLEEP_SMP)
977+ depends on !GRKERNSEC_KMEM
978 help
979 kexec is a system call that implements the ability to shutdown your
980 current kernel, and to start another kernel. It is like a reboot
981diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
982index 3040359..a494fa3 100644
983--- a/arch/arm/include/asm/atomic.h
984+++ b/arch/arm/include/asm/atomic.h
985@@ -18,17 +18,41 @@
986 #include <asm/barrier.h>
987 #include <asm/cmpxchg.h>
988
989+#ifdef CONFIG_GENERIC_ATOMIC64
990+#include <asm-generic/atomic64.h>
991+#endif
992+
993 #define ATOMIC_INIT(i) { (i) }
994
995 #ifdef __KERNEL__
996
997+#ifdef CONFIG_THUMB2_KERNEL
998+#define REFCOUNT_TRAP_INSN "bkpt 0xf1"
999+#else
1000+#define REFCOUNT_TRAP_INSN "bkpt 0xf103"
1001+#endif
1002+
1003+#define _ASM_EXTABLE(from, to) \
1004+" .pushsection __ex_table,\"a\"\n"\
1005+" .align 3\n" \
1006+" .long " #from ", " #to"\n" \
1007+" .popsection"
1008+
1009 /*
1010 * On ARM, ordinary assignment (str instruction) doesn't clear the local
1011 * strex/ldrex monitor on some implementations. The reason we can use it for
1012 * atomic_set() is the clrex or dummy strex done on every exception return.
1013 */
1014 #define atomic_read(v) (*(volatile int *)&(v)->counter)
1015+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
1016+{
1017+ return v->counter;
1018+}
1019 #define atomic_set(v,i) (((v)->counter) = (i))
1020+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
1021+{
1022+ v->counter = i;
1023+}
1024
1025 #if __LINUX_ARM_ARCH__ >= 6
1026
1027@@ -44,6 +68,36 @@ static inline void atomic_add(int i, atomic_t *v)
1028
1029 prefetchw(&v->counter);
1030 __asm__ __volatile__("@ atomic_add\n"
1031+"1: ldrex %1, [%3]\n"
1032+" adds %0, %1, %4\n"
1033+
1034+#ifdef CONFIG_PAX_REFCOUNT
1035+" bvc 3f\n"
1036+"2: " REFCOUNT_TRAP_INSN "\n"
1037+"3:\n"
1038+#endif
1039+
1040+" strex %1, %0, [%3]\n"
1041+" teq %1, #0\n"
1042+" bne 1b"
1043+
1044+#ifdef CONFIG_PAX_REFCOUNT
1045+"\n4:\n"
1046+ _ASM_EXTABLE(2b, 4b)
1047+#endif
1048+
1049+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1050+ : "r" (&v->counter), "Ir" (i)
1051+ : "cc");
1052+}
1053+
1054+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
1055+{
1056+ unsigned long tmp;
1057+ int result;
1058+
1059+ prefetchw(&v->counter);
1060+ __asm__ __volatile__("@ atomic_add_unchecked\n"
1061 "1: ldrex %0, [%3]\n"
1062 " add %0, %0, %4\n"
1063 " strex %1, %0, [%3]\n"
1064@@ -63,6 +117,43 @@ static inline int atomic_add_return(int i, atomic_t *v)
1065 prefetchw(&v->counter);
1066
1067 __asm__ __volatile__("@ atomic_add_return\n"
1068+"1: ldrex %1, [%3]\n"
1069+" adds %0, %1, %4\n"
1070+
1071+#ifdef CONFIG_PAX_REFCOUNT
1072+" bvc 3f\n"
1073+" mov %0, %1\n"
1074+"2: " REFCOUNT_TRAP_INSN "\n"
1075+"3:\n"
1076+#endif
1077+
1078+" strex %1, %0, [%3]\n"
1079+" teq %1, #0\n"
1080+" bne 1b"
1081+
1082+#ifdef CONFIG_PAX_REFCOUNT
1083+"\n4:\n"
1084+ _ASM_EXTABLE(2b, 4b)
1085+#endif
1086+
1087+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1088+ : "r" (&v->counter), "Ir" (i)
1089+ : "cc");
1090+
1091+ smp_mb();
1092+
1093+ return result;
1094+}
1095+
1096+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
1097+{
1098+ unsigned long tmp;
1099+ int result;
1100+
1101+ smp_mb();
1102+ prefetchw(&v->counter);
1103+
1104+ __asm__ __volatile__("@ atomic_add_return_unchecked\n"
1105 "1: ldrex %0, [%3]\n"
1106 " add %0, %0, %4\n"
1107 " strex %1, %0, [%3]\n"
1108@@ -84,6 +175,36 @@ static inline void atomic_sub(int i, atomic_t *v)
1109
1110 prefetchw(&v->counter);
1111 __asm__ __volatile__("@ atomic_sub\n"
1112+"1: ldrex %1, [%3]\n"
1113+" subs %0, %1, %4\n"
1114+
1115+#ifdef CONFIG_PAX_REFCOUNT
1116+" bvc 3f\n"
1117+"2: " REFCOUNT_TRAP_INSN "\n"
1118+"3:\n"
1119+#endif
1120+
1121+" strex %1, %0, [%3]\n"
1122+" teq %1, #0\n"
1123+" bne 1b"
1124+
1125+#ifdef CONFIG_PAX_REFCOUNT
1126+"\n4:\n"
1127+ _ASM_EXTABLE(2b, 4b)
1128+#endif
1129+
1130+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1131+ : "r" (&v->counter), "Ir" (i)
1132+ : "cc");
1133+}
1134+
1135+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
1136+{
1137+ unsigned long tmp;
1138+ int result;
1139+
1140+ prefetchw(&v->counter);
1141+ __asm__ __volatile__("@ atomic_sub_unchecked\n"
1142 "1: ldrex %0, [%3]\n"
1143 " sub %0, %0, %4\n"
1144 " strex %1, %0, [%3]\n"
1145@@ -103,11 +224,25 @@ static inline int atomic_sub_return(int i, atomic_t *v)
1146 prefetchw(&v->counter);
1147
1148 __asm__ __volatile__("@ atomic_sub_return\n"
1149-"1: ldrex %0, [%3]\n"
1150-" sub %0, %0, %4\n"
1151+"1: ldrex %1, [%3]\n"
1152+" subs %0, %1, %4\n"
1153+
1154+#ifdef CONFIG_PAX_REFCOUNT
1155+" bvc 3f\n"
1156+" mov %0, %1\n"
1157+"2: " REFCOUNT_TRAP_INSN "\n"
1158+"3:\n"
1159+#endif
1160+
1161 " strex %1, %0, [%3]\n"
1162 " teq %1, #0\n"
1163 " bne 1b"
1164+
1165+#ifdef CONFIG_PAX_REFCOUNT
1166+"\n4:\n"
1167+ _ASM_EXTABLE(2b, 4b)
1168+#endif
1169+
1170 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1171 : "r" (&v->counter), "Ir" (i)
1172 : "cc");
1173@@ -152,12 +287,24 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1174 __asm__ __volatile__ ("@ atomic_add_unless\n"
1175 "1: ldrex %0, [%4]\n"
1176 " teq %0, %5\n"
1177-" beq 2f\n"
1178-" add %1, %0, %6\n"
1179+" beq 4f\n"
1180+" adds %1, %0, %6\n"
1181+
1182+#ifdef CONFIG_PAX_REFCOUNT
1183+" bvc 3f\n"
1184+"2: " REFCOUNT_TRAP_INSN "\n"
1185+"3:\n"
1186+#endif
1187+
1188 " strex %2, %1, [%4]\n"
1189 " teq %2, #0\n"
1190 " bne 1b\n"
1191-"2:"
1192+"4:"
1193+
1194+#ifdef CONFIG_PAX_REFCOUNT
1195+ _ASM_EXTABLE(2b, 4b)
1196+#endif
1197+
1198 : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
1199 : "r" (&v->counter), "r" (u), "r" (a)
1200 : "cc");
1201@@ -168,6 +315,28 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1202 return oldval;
1203 }
1204
1205+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
1206+{
1207+ unsigned long oldval, res;
1208+
1209+ smp_mb();
1210+
1211+ do {
1212+ __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
1213+ "ldrex %1, [%3]\n"
1214+ "mov %0, #0\n"
1215+ "teq %1, %4\n"
1216+ "strexeq %0, %5, [%3]\n"
1217+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1218+ : "r" (&ptr->counter), "Ir" (old), "r" (new)
1219+ : "cc");
1220+ } while (res);
1221+
1222+ smp_mb();
1223+
1224+ return oldval;
1225+}
1226+
1227 #else /* ARM_ARCH_6 */
1228
1229 #ifdef CONFIG_SMP
1230@@ -186,7 +355,17 @@ static inline int atomic_add_return(int i, atomic_t *v)
1231
1232 return val;
1233 }
1234+
1235+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
1236+{
1237+ return atomic_add_return(i, v);
1238+}
1239+
1240 #define atomic_add(i, v) (void) atomic_add_return(i, v)
1241+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
1242+{
1243+ (void) atomic_add_return(i, v);
1244+}
1245
1246 static inline int atomic_sub_return(int i, atomic_t *v)
1247 {
1248@@ -201,6 +380,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
1249 return val;
1250 }
1251 #define atomic_sub(i, v) (void) atomic_sub_return(i, v)
1252+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
1253+{
1254+ (void) atomic_sub_return(i, v);
1255+}
1256
1257 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1258 {
1259@@ -216,6 +399,11 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1260 return ret;
1261 }
1262
1263+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
1264+{
1265+ return atomic_cmpxchg(v, old, new);
1266+}
1267+
1268 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1269 {
1270 int c, old;
1271@@ -229,13 +417,33 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1272 #endif /* __LINUX_ARM_ARCH__ */
1273
1274 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
1275+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
1276+{
1277+ return xchg(&v->counter, new);
1278+}
1279
1280 #define atomic_inc(v) atomic_add(1, v)
1281+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
1282+{
1283+ atomic_add_unchecked(1, v);
1284+}
1285 #define atomic_dec(v) atomic_sub(1, v)
1286+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
1287+{
1288+ atomic_sub_unchecked(1, v);
1289+}
1290
1291 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
1292+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
1293+{
1294+ return atomic_add_return_unchecked(1, v) == 0;
1295+}
1296 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
1297 #define atomic_inc_return(v) (atomic_add_return(1, v))
1298+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
1299+{
1300+ return atomic_add_return_unchecked(1, v);
1301+}
1302 #define atomic_dec_return(v) (atomic_sub_return(1, v))
1303 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
1304
1305@@ -246,6 +454,14 @@ typedef struct {
1306 long long counter;
1307 } atomic64_t;
1308
1309+#ifdef CONFIG_PAX_REFCOUNT
1310+typedef struct {
1311+ long long counter;
1312+} atomic64_unchecked_t;
1313+#else
1314+typedef atomic64_t atomic64_unchecked_t;
1315+#endif
1316+
1317 #define ATOMIC64_INIT(i) { (i) }
1318
1319 #ifdef CONFIG_ARM_LPAE
1320@@ -262,6 +478,19 @@ static inline long long atomic64_read(const atomic64_t *v)
1321 return result;
1322 }
1323
1324+static inline long long atomic64_read_unchecked(const atomic64_unchecked_t *v)
1325+{
1326+ long long result;
1327+
1328+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
1329+" ldrd %0, %H0, [%1]"
1330+ : "=&r" (result)
1331+ : "r" (&v->counter), "Qo" (v->counter)
1332+ );
1333+
1334+ return result;
1335+}
1336+
1337 static inline void atomic64_set(atomic64_t *v, long long i)
1338 {
1339 __asm__ __volatile__("@ atomic64_set\n"
1340@@ -270,6 +499,15 @@ static inline void atomic64_set(atomic64_t *v, long long i)
1341 : "r" (&v->counter), "r" (i)
1342 );
1343 }
1344+
1345+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
1346+{
1347+ __asm__ __volatile__("@ atomic64_set_unchecked\n"
1348+" strd %2, %H2, [%1]"
1349+ : "=Qo" (v->counter)
1350+ : "r" (&v->counter), "r" (i)
1351+ );
1352+}
1353 #else
1354 static inline long long atomic64_read(const atomic64_t *v)
1355 {
1356@@ -284,6 +522,19 @@ static inline long long atomic64_read(const atomic64_t *v)
1357 return result;
1358 }
1359
1360+static inline long long atomic64_read_unchecked(const atomic64_unchecked_t *v)
1361+{
1362+ long long result;
1363+
1364+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
1365+" ldrexd %0, %H0, [%1]"
1366+ : "=&r" (result)
1367+ : "r" (&v->counter), "Qo" (v->counter)
1368+ );
1369+
1370+ return result;
1371+}
1372+
1373 static inline void atomic64_set(atomic64_t *v, long long i)
1374 {
1375 long long tmp;
1376@@ -298,6 +549,21 @@ static inline void atomic64_set(atomic64_t *v, long long i)
1377 : "r" (&v->counter), "r" (i)
1378 : "cc");
1379 }
1380+
1381+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
1382+{
1383+ long long tmp;
1384+
1385+ prefetchw(&v->counter);
1386+ __asm__ __volatile__("@ atomic64_set_unchecked\n"
1387+"1: ldrexd %0, %H0, [%2]\n"
1388+" strexd %0, %3, %H3, [%2]\n"
1389+" teq %0, #0\n"
1390+" bne 1b"
1391+ : "=&r" (tmp), "=Qo" (v->counter)
1392+ : "r" (&v->counter), "r" (i)
1393+ : "cc");
1394+}
1395 #endif
1396
1397 static inline void atomic64_add(long long i, atomic64_t *v)
1398@@ -309,6 +575,37 @@ static inline void atomic64_add(long long i, atomic64_t *v)
1399 __asm__ __volatile__("@ atomic64_add\n"
1400 "1: ldrexd %0, %H0, [%3]\n"
1401 " adds %Q0, %Q0, %Q4\n"
1402+" adcs %R0, %R0, %R4\n"
1403+
1404+#ifdef CONFIG_PAX_REFCOUNT
1405+" bvc 3f\n"
1406+"2: " REFCOUNT_TRAP_INSN "\n"
1407+"3:\n"
1408+#endif
1409+
1410+" strexd %1, %0, %H0, [%3]\n"
1411+" teq %1, #0\n"
1412+" bne 1b"
1413+
1414+#ifdef CONFIG_PAX_REFCOUNT
1415+"\n4:\n"
1416+ _ASM_EXTABLE(2b, 4b)
1417+#endif
1418+
1419+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1420+ : "r" (&v->counter), "r" (i)
1421+ : "cc");
1422+}
1423+
1424+static inline void atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
1425+{
1426+ long long result;
1427+ unsigned long tmp;
1428+
1429+ prefetchw(&v->counter);
1430+ __asm__ __volatile__("@ atomic64_add_unchecked\n"
1431+"1: ldrexd %0, %H0, [%3]\n"
1432+" adds %Q0, %Q0, %Q4\n"
1433 " adc %R0, %R0, %R4\n"
1434 " strexd %1, %0, %H0, [%3]\n"
1435 " teq %1, #0\n"
1436@@ -329,6 +626,44 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
1437 __asm__ __volatile__("@ atomic64_add_return\n"
1438 "1: ldrexd %0, %H0, [%3]\n"
1439 " adds %Q0, %Q0, %Q4\n"
1440+" adcs %R0, %R0, %R4\n"
1441+
1442+#ifdef CONFIG_PAX_REFCOUNT
1443+" bvc 3f\n"
1444+" mov %0, %1\n"
1445+" mov %H0, %H1\n"
1446+"2: " REFCOUNT_TRAP_INSN "\n"
1447+"3:\n"
1448+#endif
1449+
1450+" strexd %1, %0, %H0, [%3]\n"
1451+" teq %1, #0\n"
1452+" bne 1b"
1453+
1454+#ifdef CONFIG_PAX_REFCOUNT
1455+"\n4:\n"
1456+ _ASM_EXTABLE(2b, 4b)
1457+#endif
1458+
1459+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1460+ : "r" (&v->counter), "r" (i)
1461+ : "cc");
1462+
1463+ smp_mb();
1464+
1465+ return result;
1466+}
1467+
1468+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
1469+{
1470+ long long result;
1471+ unsigned long tmp;
1472+
1473+ smp_mb();
1474+
1475+ __asm__ __volatile__("@ atomic64_add_return_unchecked\n"
1476+"1: ldrexd %0, %H0, [%3]\n"
1477+" adds %Q0, %Q0, %Q4\n"
1478 " adc %R0, %R0, %R4\n"
1479 " strexd %1, %0, %H0, [%3]\n"
1480 " teq %1, #0\n"
1481@@ -351,6 +686,37 @@ static inline void atomic64_sub(long long i, atomic64_t *v)
1482 __asm__ __volatile__("@ atomic64_sub\n"
1483 "1: ldrexd %0, %H0, [%3]\n"
1484 " subs %Q0, %Q0, %Q4\n"
1485+" sbcs %R0, %R0, %R4\n"
1486+
1487+#ifdef CONFIG_PAX_REFCOUNT
1488+" bvc 3f\n"
1489+"2: " REFCOUNT_TRAP_INSN "\n"
1490+"3:\n"
1491+#endif
1492+
1493+" strexd %1, %0, %H0, [%3]\n"
1494+" teq %1, #0\n"
1495+" bne 1b"
1496+
1497+#ifdef CONFIG_PAX_REFCOUNT
1498+"\n4:\n"
1499+ _ASM_EXTABLE(2b, 4b)
1500+#endif
1501+
1502+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1503+ : "r" (&v->counter), "r" (i)
1504+ : "cc");
1505+}
1506+
1507+static inline void atomic64_sub_unchecked(long long i, atomic64_unchecked_t *v)
1508+{
1509+ long long result;
1510+ unsigned long tmp;
1511+
1512+ prefetchw(&v->counter);
1513+ __asm__ __volatile__("@ atomic64_sub_unchecked\n"
1514+"1: ldrexd %0, %H0, [%3]\n"
1515+" subs %Q0, %Q0, %Q4\n"
1516 " sbc %R0, %R0, %R4\n"
1517 " strexd %1, %0, %H0, [%3]\n"
1518 " teq %1, #0\n"
1519@@ -371,10 +737,25 @@ static inline long long atomic64_sub_return(long long i, atomic64_t *v)
1520 __asm__ __volatile__("@ atomic64_sub_return\n"
1521 "1: ldrexd %0, %H0, [%3]\n"
1522 " subs %Q0, %Q0, %Q4\n"
1523-" sbc %R0, %R0, %R4\n"
1524+" sbcs %R0, %R0, %R4\n"
1525+
1526+#ifdef CONFIG_PAX_REFCOUNT
1527+" bvc 3f\n"
1528+" mov %0, %1\n"
1529+" mov %H0, %H1\n"
1530+"2: " REFCOUNT_TRAP_INSN "\n"
1531+"3:\n"
1532+#endif
1533+
1534 " strexd %1, %0, %H0, [%3]\n"
1535 " teq %1, #0\n"
1536 " bne 1b"
1537+
1538+#ifdef CONFIG_PAX_REFCOUNT
1539+"\n4:\n"
1540+ _ASM_EXTABLE(2b, 4b)
1541+#endif
1542+
1543 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1544 : "r" (&v->counter), "r" (i)
1545 : "cc");
1546@@ -410,6 +791,31 @@ static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
1547 return oldval;
1548 }
1549
1550+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, long long old,
1551+ long long new)
1552+{
1553+ long long oldval;
1554+ unsigned long res;
1555+
1556+ smp_mb();
1557+
1558+ do {
1559+ __asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n"
1560+ "ldrexd %1, %H1, [%3]\n"
1561+ "mov %0, #0\n"
1562+ "teq %1, %4\n"
1563+ "teqeq %H1, %H4\n"
1564+ "strexdeq %0, %5, %H5, [%3]"
1565+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1566+ : "r" (&ptr->counter), "r" (old), "r" (new)
1567+ : "cc");
1568+ } while (res);
1569+
1570+ smp_mb();
1571+
1572+ return oldval;
1573+}
1574+
1575 static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
1576 {
1577 long long result;
1578@@ -435,21 +841,35 @@ static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
1579 static inline long long atomic64_dec_if_positive(atomic64_t *v)
1580 {
1581 long long result;
1582- unsigned long tmp;
1583+ u64 tmp;
1584
1585 smp_mb();
1586 prefetchw(&v->counter);
1587
1588 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
1589-"1: ldrexd %0, %H0, [%3]\n"
1590-" subs %Q0, %Q0, #1\n"
1591-" sbc %R0, %R0, #0\n"
1592+"1: ldrexd %1, %H1, [%3]\n"
1593+" subs %Q0, %Q1, #1\n"
1594+" sbcs %R0, %R1, #0\n"
1595+
1596+#ifdef CONFIG_PAX_REFCOUNT
1597+" bvc 3f\n"
1598+" mov %Q0, %Q1\n"
1599+" mov %R0, %R1\n"
1600+"2: " REFCOUNT_TRAP_INSN "\n"
1601+"3:\n"
1602+#endif
1603+
1604 " teq %R0, #0\n"
1605-" bmi 2f\n"
1606+" bmi 4f\n"
1607 " strexd %1, %0, %H0, [%3]\n"
1608 " teq %1, #0\n"
1609 " bne 1b\n"
1610-"2:"
1611+"4:\n"
1612+
1613+#ifdef CONFIG_PAX_REFCOUNT
1614+ _ASM_EXTABLE(2b, 4b)
1615+#endif
1616+
1617 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1618 : "r" (&v->counter)
1619 : "cc");
1620@@ -473,13 +893,25 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
1621 " teq %0, %5\n"
1622 " teqeq %H0, %H5\n"
1623 " moveq %1, #0\n"
1624-" beq 2f\n"
1625+" beq 4f\n"
1626 " adds %Q0, %Q0, %Q6\n"
1627-" adc %R0, %R0, %R6\n"
1628+" adcs %R0, %R0, %R6\n"
1629+
1630+#ifdef CONFIG_PAX_REFCOUNT
1631+" bvc 3f\n"
1632+"2: " REFCOUNT_TRAP_INSN "\n"
1633+"3:\n"
1634+#endif
1635+
1636 " strexd %2, %0, %H0, [%4]\n"
1637 " teq %2, #0\n"
1638 " bne 1b\n"
1639-"2:"
1640+"4:\n"
1641+
1642+#ifdef CONFIG_PAX_REFCOUNT
1643+ _ASM_EXTABLE(2b, 4b)
1644+#endif
1645+
1646 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
1647 : "r" (&v->counter), "r" (u), "r" (a)
1648 : "cc");
1649@@ -492,10 +924,13 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
1650
1651 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
1652 #define atomic64_inc(v) atomic64_add(1LL, (v))
1653+#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1LL, (v))
1654 #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
1655+#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1LL, (v))
1656 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
1657 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
1658 #define atomic64_dec(v) atomic64_sub(1LL, (v))
1659+#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1LL, (v))
1660 #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
1661 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
1662 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
1663diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h
1664index c6a3e73..35cca85 100644
1665--- a/arch/arm/include/asm/barrier.h
1666+++ b/arch/arm/include/asm/barrier.h
1667@@ -63,7 +63,7 @@
1668 do { \
1669 compiletime_assert_atomic_type(*p); \
1670 smp_mb(); \
1671- ACCESS_ONCE(*p) = (v); \
1672+ ACCESS_ONCE_RW(*p) = (v); \
1673 } while (0)
1674
1675 #define smp_load_acquire(p) \
1676diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
1677index 75fe66b..ba3dee4 100644
1678--- a/arch/arm/include/asm/cache.h
1679+++ b/arch/arm/include/asm/cache.h
1680@@ -4,8 +4,10 @@
1681 #ifndef __ASMARM_CACHE_H
1682 #define __ASMARM_CACHE_H
1683
1684+#include <linux/const.h>
1685+
1686 #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
1687-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1688+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1689
1690 /*
1691 * Memory returned by kmalloc() may be used for DMA, so we must make
1692@@ -24,5 +26,6 @@
1693 #endif
1694
1695 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
1696+#define __read_only __attribute__ ((__section__(".data..read_only")))
1697
1698 #endif
1699diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
1700index 10e78d0..dc8505d 100644
1701--- a/arch/arm/include/asm/cacheflush.h
1702+++ b/arch/arm/include/asm/cacheflush.h
1703@@ -116,7 +116,7 @@ struct cpu_cache_fns {
1704 void (*dma_unmap_area)(const void *, size_t, int);
1705
1706 void (*dma_flush_range)(const void *, const void *);
1707-};
1708+} __no_const;
1709
1710 /*
1711 * Select the calling method
1712diff --git a/arch/arm/include/asm/checksum.h b/arch/arm/include/asm/checksum.h
1713index 5233151..87a71fa 100644
1714--- a/arch/arm/include/asm/checksum.h
1715+++ b/arch/arm/include/asm/checksum.h
1716@@ -37,7 +37,19 @@ __wsum
1717 csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
1718
1719 __wsum
1720-csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1721+__csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1722+
1723+static inline __wsum
1724+csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr)
1725+{
1726+ __wsum ret;
1727+ pax_open_userland();
1728+ ret = __csum_partial_copy_from_user(src, dst, len, sum, err_ptr);
1729+ pax_close_userland();
1730+ return ret;
1731+}
1732+
1733+
1734
1735 /*
1736 * Fold a partial checksum without adding pseudo headers
1737diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
1738index abb2c37..96db950 100644
1739--- a/arch/arm/include/asm/cmpxchg.h
1740+++ b/arch/arm/include/asm/cmpxchg.h
1741@@ -104,6 +104,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
1742
1743 #define xchg(ptr,x) \
1744 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1745+#define xchg_unchecked(ptr,x) \
1746+ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1747
1748 #include <asm-generic/cmpxchg-local.h>
1749
1750diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
1751index 6ddbe44..b5e38b1 100644
1752--- a/arch/arm/include/asm/domain.h
1753+++ b/arch/arm/include/asm/domain.h
1754@@ -48,18 +48,37 @@
1755 * Domain types
1756 */
1757 #define DOMAIN_NOACCESS 0
1758-#define DOMAIN_CLIENT 1
1759 #ifdef CONFIG_CPU_USE_DOMAINS
1760+#define DOMAIN_USERCLIENT 1
1761+#define DOMAIN_KERNELCLIENT 1
1762 #define DOMAIN_MANAGER 3
1763+#define DOMAIN_VECTORS DOMAIN_USER
1764 #else
1765+
1766+#ifdef CONFIG_PAX_KERNEXEC
1767 #define DOMAIN_MANAGER 1
1768+#define DOMAIN_KERNEXEC 3
1769+#else
1770+#define DOMAIN_MANAGER 1
1771+#endif
1772+
1773+#ifdef CONFIG_PAX_MEMORY_UDEREF
1774+#define DOMAIN_USERCLIENT 0
1775+#define DOMAIN_UDEREF 1
1776+#define DOMAIN_VECTORS DOMAIN_KERNEL
1777+#else
1778+#define DOMAIN_USERCLIENT 1
1779+#define DOMAIN_VECTORS DOMAIN_USER
1780+#endif
1781+#define DOMAIN_KERNELCLIENT 1
1782+
1783 #endif
1784
1785 #define domain_val(dom,type) ((type) << (2*(dom)))
1786
1787 #ifndef __ASSEMBLY__
1788
1789-#ifdef CONFIG_CPU_USE_DOMAINS
1790+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
1791 static inline void set_domain(unsigned val)
1792 {
1793 asm volatile(
1794@@ -68,15 +87,7 @@ static inline void set_domain(unsigned val)
1795 isb();
1796 }
1797
1798-#define modify_domain(dom,type) \
1799- do { \
1800- struct thread_info *thread = current_thread_info(); \
1801- unsigned int domain = thread->cpu_domain; \
1802- domain &= ~domain_val(dom, DOMAIN_MANAGER); \
1803- thread->cpu_domain = domain | domain_val(dom, type); \
1804- set_domain(thread->cpu_domain); \
1805- } while (0)
1806-
1807+extern void modify_domain(unsigned int dom, unsigned int type);
1808 #else
1809 static inline void set_domain(unsigned val) { }
1810 static inline void modify_domain(unsigned dom, unsigned type) { }
1811diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
1812index afb9caf..9a0bac0 100644
1813--- a/arch/arm/include/asm/elf.h
1814+++ b/arch/arm/include/asm/elf.h
1815@@ -115,7 +115,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1816 the loader. We need to make sure that it is out of the way of the program
1817 that it will "exec", and that there is sufficient room for the brk. */
1818
1819-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1820+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1821+
1822+#ifdef CONFIG_PAX_ASLR
1823+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
1824+
1825+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1826+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1827+#endif
1828
1829 /* When the program starts, a1 contains a pointer to a function to be
1830 registered with atexit, as per the SVR4 ABI. A value of 0 means we
1831@@ -125,10 +132,6 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1832 extern void elf_set_personality(const struct elf32_hdr *);
1833 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
1834
1835-struct mm_struct;
1836-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1837-#define arch_randomize_brk arch_randomize_brk
1838-
1839 #ifdef CONFIG_MMU
1840 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
1841 struct linux_binprm;
1842diff --git a/arch/arm/include/asm/fncpy.h b/arch/arm/include/asm/fncpy.h
1843index de53547..52b9a28 100644
1844--- a/arch/arm/include/asm/fncpy.h
1845+++ b/arch/arm/include/asm/fncpy.h
1846@@ -81,7 +81,9 @@
1847 BUG_ON((uintptr_t)(dest_buf) & (FNCPY_ALIGN - 1) || \
1848 (__funcp_address & ~(uintptr_t)1 & (FNCPY_ALIGN - 1))); \
1849 \
1850+ pax_open_kernel(); \
1851 memcpy(dest_buf, (void const *)(__funcp_address & ~1), size); \
1852+ pax_close_kernel(); \
1853 flush_icache_range((unsigned long)(dest_buf), \
1854 (unsigned long)(dest_buf) + (size)); \
1855 \
1856diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
1857index 53e69da..3fdc896 100644
1858--- a/arch/arm/include/asm/futex.h
1859+++ b/arch/arm/include/asm/futex.h
1860@@ -46,6 +46,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1861 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
1862 return -EFAULT;
1863
1864+ pax_open_userland();
1865+
1866 smp_mb();
1867 /* Prefetching cannot fault */
1868 prefetchw(uaddr);
1869@@ -63,6 +65,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1870 : "cc", "memory");
1871 smp_mb();
1872
1873+ pax_close_userland();
1874+
1875 *uval = val;
1876 return ret;
1877 }
1878@@ -93,6 +97,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1879 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
1880 return -EFAULT;
1881
1882+ pax_open_userland();
1883+
1884 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
1885 "1: " TUSER(ldr) " %1, [%4]\n"
1886 " teq %1, %2\n"
1887@@ -103,6 +109,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1888 : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
1889 : "cc", "memory");
1890
1891+ pax_close_userland();
1892+
1893 *uval = val;
1894 return ret;
1895 }
1896@@ -125,6 +133,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
1897 return -EFAULT;
1898
1899 pagefault_disable(); /* implies preempt_disable() */
1900+ pax_open_userland();
1901
1902 switch (op) {
1903 case FUTEX_OP_SET:
1904@@ -146,6 +155,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
1905 ret = -ENOSYS;
1906 }
1907
1908+ pax_close_userland();
1909 pagefault_enable(); /* subsumes preempt_enable() */
1910
1911 if (!ret) {
1912diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
1913index 83eb2f7..ed77159 100644
1914--- a/arch/arm/include/asm/kmap_types.h
1915+++ b/arch/arm/include/asm/kmap_types.h
1916@@ -4,6 +4,6 @@
1917 /*
1918 * This is the "bare minimum". AIO seems to require this.
1919 */
1920-#define KM_TYPE_NR 16
1921+#define KM_TYPE_NR 17
1922
1923 #endif
1924diff --git a/arch/arm/include/asm/mach/dma.h b/arch/arm/include/asm/mach/dma.h
1925index 9e614a1..3302cca 100644
1926--- a/arch/arm/include/asm/mach/dma.h
1927+++ b/arch/arm/include/asm/mach/dma.h
1928@@ -22,7 +22,7 @@ struct dma_ops {
1929 int (*residue)(unsigned int, dma_t *); /* optional */
1930 int (*setspeed)(unsigned int, dma_t *, int); /* optional */
1931 const char *type;
1932-};
1933+} __do_const;
1934
1935 struct dma_struct {
1936 void *addr; /* single DMA address */
1937diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
1938index f98c7f3..e5c626d 100644
1939--- a/arch/arm/include/asm/mach/map.h
1940+++ b/arch/arm/include/asm/mach/map.h
1941@@ -23,17 +23,19 @@ struct map_desc {
1942
1943 /* types 0-3 are defined in asm/io.h */
1944 enum {
1945- MT_UNCACHED = 4,
1946- MT_CACHECLEAN,
1947- MT_MINICLEAN,
1948+ MT_UNCACHED_RW = 4,
1949+ MT_CACHECLEAN_RO,
1950+ MT_MINICLEAN_RO,
1951 MT_LOW_VECTORS,
1952 MT_HIGH_VECTORS,
1953- MT_MEMORY_RWX,
1954+ __MT_MEMORY_RWX,
1955 MT_MEMORY_RW,
1956- MT_ROM,
1957- MT_MEMORY_RWX_NONCACHED,
1958+ MT_MEMORY_RX,
1959+ MT_ROM_RX,
1960+ MT_MEMORY_RW_NONCACHED,
1961+ MT_MEMORY_RX_NONCACHED,
1962 MT_MEMORY_RW_DTCM,
1963- MT_MEMORY_RWX_ITCM,
1964+ MT_MEMORY_RX_ITCM,
1965 MT_MEMORY_RW_SO,
1966 MT_MEMORY_DMA_READY,
1967 };
1968diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
1969index 891a56b..48f337e 100644
1970--- a/arch/arm/include/asm/outercache.h
1971+++ b/arch/arm/include/asm/outercache.h
1972@@ -36,7 +36,7 @@ struct outer_cache_fns {
1973
1974 /* This is an ARM L2C thing */
1975 void (*write_sec)(unsigned long, unsigned);
1976-};
1977+} __no_const;
1978
1979 extern struct outer_cache_fns outer_cache;
1980
1981diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
1982index 4355f0e..cd9168e 100644
1983--- a/arch/arm/include/asm/page.h
1984+++ b/arch/arm/include/asm/page.h
1985@@ -23,6 +23,7 @@
1986
1987 #else
1988
1989+#include <linux/compiler.h>
1990 #include <asm/glue.h>
1991
1992 /*
1993@@ -114,7 +115,7 @@ struct cpu_user_fns {
1994 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
1995 void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
1996 unsigned long vaddr, struct vm_area_struct *vma);
1997-};
1998+} __no_const;
1999
2000 #ifdef MULTI_USER
2001 extern struct cpu_user_fns cpu_user;
2002diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
2003index 78a7793..e3dc06c 100644
2004--- a/arch/arm/include/asm/pgalloc.h
2005+++ b/arch/arm/include/asm/pgalloc.h
2006@@ -17,6 +17,7 @@
2007 #include <asm/processor.h>
2008 #include <asm/cacheflush.h>
2009 #include <asm/tlbflush.h>
2010+#include <asm/system_info.h>
2011
2012 #define check_pgt_cache() do { } while (0)
2013
2014@@ -43,6 +44,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
2015 set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
2016 }
2017
2018+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
2019+{
2020+ pud_populate(mm, pud, pmd);
2021+}
2022+
2023 #else /* !CONFIG_ARM_LPAE */
2024
2025 /*
2026@@ -51,6 +57,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
2027 #define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
2028 #define pmd_free(mm, pmd) do { } while (0)
2029 #define pud_populate(mm,pmd,pte) BUG()
2030+#define pud_populate_kernel(mm,pmd,pte) BUG()
2031
2032 #endif /* CONFIG_ARM_LPAE */
2033
2034@@ -128,6 +135,19 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
2035 __free_page(pte);
2036 }
2037
2038+static inline void __section_update(pmd_t *pmdp, unsigned long addr, pmdval_t prot)
2039+{
2040+#ifdef CONFIG_ARM_LPAE
2041+ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
2042+#else
2043+ if (addr & SECTION_SIZE)
2044+ pmdp[1] = __pmd(pmd_val(pmdp[1]) | prot);
2045+ else
2046+ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
2047+#endif
2048+ flush_pmd_entry(pmdp);
2049+}
2050+
2051 static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte,
2052 pmdval_t prot)
2053 {
2054@@ -157,7 +177,7 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
2055 static inline void
2056 pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep)
2057 {
2058- __pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE);
2059+ __pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE | __supported_pmd_mask);
2060 }
2061 #define pmd_pgtable(pmd) pmd_page(pmd)
2062
2063diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h
2064index 5cfba15..f415e1a 100644
2065--- a/arch/arm/include/asm/pgtable-2level-hwdef.h
2066+++ b/arch/arm/include/asm/pgtable-2level-hwdef.h
2067@@ -20,12 +20,15 @@
2068 #define PMD_TYPE_FAULT (_AT(pmdval_t, 0) << 0)
2069 #define PMD_TYPE_TABLE (_AT(pmdval_t, 1) << 0)
2070 #define PMD_TYPE_SECT (_AT(pmdval_t, 2) << 0)
2071+#define PMD_PXNTABLE (_AT(pmdval_t, 1) << 2) /* v7 */
2072 #define PMD_BIT4 (_AT(pmdval_t, 1) << 4)
2073 #define PMD_DOMAIN(x) (_AT(pmdval_t, (x)) << 5)
2074 #define PMD_PROTECTION (_AT(pmdval_t, 1) << 9) /* v5 */
2075+
2076 /*
2077 * - section
2078 */
2079+#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */
2080 #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
2081 #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
2082 #define PMD_SECT_XN (_AT(pmdval_t, 1) << 4) /* v6 */
2083@@ -37,6 +40,7 @@
2084 #define PMD_SECT_nG (_AT(pmdval_t, 1) << 17) /* v6 */
2085 #define PMD_SECT_SUPER (_AT(pmdval_t, 1) << 18) /* v6 */
2086 #define PMD_SECT_AF (_AT(pmdval_t, 0))
2087+#define PMD_SECT_RDONLY (_AT(pmdval_t, 0))
2088
2089 #define PMD_SECT_UNCACHED (_AT(pmdval_t, 0))
2090 #define PMD_SECT_BUFFERED (PMD_SECT_BUFFERABLE)
2091@@ -66,6 +70,7 @@
2092 * - extended small page/tiny page
2093 */
2094 #define PTE_EXT_XN (_AT(pteval_t, 1) << 0) /* v6 */
2095+#define PTE_EXT_PXN (_AT(pteval_t, 1) << 2) /* v7 */
2096 #define PTE_EXT_AP_MASK (_AT(pteval_t, 3) << 4)
2097 #define PTE_EXT_AP0 (_AT(pteval_t, 1) << 4)
2098 #define PTE_EXT_AP1 (_AT(pteval_t, 2) << 4)
2099diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
2100index 219ac88..73ec32a 100644
2101--- a/arch/arm/include/asm/pgtable-2level.h
2102+++ b/arch/arm/include/asm/pgtable-2level.h
2103@@ -126,6 +126,9 @@
2104 #define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */
2105 #define L_PTE_NONE (_AT(pteval_t, 1) << 11)
2106
2107+/* Two-level page tables only have PXN in the PGD, not in the PTE. */
2108+#define L_PTE_PXN (_AT(pteval_t, 0))
2109+
2110 /*
2111 * These are the memory types, defined to be compatible with
2112 * pre-ARMv6 CPUs cacheable and bufferable bits: XXCB
2113diff --git a/arch/arm/include/asm/pgtable-3level-hwdef.h b/arch/arm/include/asm/pgtable-3level-hwdef.h
2114index 9fd61c7..f8f1cff 100644
2115--- a/arch/arm/include/asm/pgtable-3level-hwdef.h
2116+++ b/arch/arm/include/asm/pgtable-3level-hwdef.h
2117@@ -76,6 +76,7 @@
2118 #define PTE_EXT_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
2119 #define PTE_EXT_AF (_AT(pteval_t, 1) << 10) /* Access Flag */
2120 #define PTE_EXT_NG (_AT(pteval_t, 1) << 11) /* nG */
2121+#define PTE_EXT_PXN (_AT(pteval_t, 1) << 53) /* PXN */
2122 #define PTE_EXT_XN (_AT(pteval_t, 1) << 54) /* XN */
2123
2124 /*
2125diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
2126index 06e0bc0..c65bca8 100644
2127--- a/arch/arm/include/asm/pgtable-3level.h
2128+++ b/arch/arm/include/asm/pgtable-3level.h
2129@@ -81,6 +81,7 @@
2130 #define L_PTE_USER (_AT(pteval_t, 1) << 6) /* AP[1] */
2131 #define L_PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
2132 #define L_PTE_YOUNG (_AT(pteval_t, 1) << 10) /* AF */
2133+#define L_PTE_PXN (_AT(pteval_t, 1) << 53) /* PXN */
2134 #define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */
2135 #define L_PTE_DIRTY (_AT(pteval_t, 1) << 55)
2136 #define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56)
2137@@ -92,10 +93,12 @@
2138 #define L_PMD_SECT_SPLITTING (_AT(pmdval_t, 1) << 56)
2139 #define L_PMD_SECT_NONE (_AT(pmdval_t, 1) << 57)
2140 #define L_PMD_SECT_RDONLY (_AT(pteval_t, 1) << 58)
2141+#define PMD_SECT_RDONLY PMD_SECT_AP2
2142
2143 /*
2144 * To be used in assembly code with the upper page attributes.
2145 */
2146+#define L_PTE_PXN_HIGH (1 << (53 - 32))
2147 #define L_PTE_XN_HIGH (1 << (54 - 32))
2148 #define L_PTE_DIRTY_HIGH (1 << (55 - 32))
2149
2150diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
2151index 01baef0..73c156e 100644
2152--- a/arch/arm/include/asm/pgtable.h
2153+++ b/arch/arm/include/asm/pgtable.h
2154@@ -33,6 +33,9 @@
2155 #include <asm/pgtable-2level.h>
2156 #endif
2157
2158+#define ktla_ktva(addr) (addr)
2159+#define ktva_ktla(addr) (addr)
2160+
2161 /*
2162 * Just any arbitrary offset to the start of the vmalloc VM area: the
2163 * current 8MB value just means that there will be a 8MB "hole" after the
2164@@ -48,6 +51,9 @@
2165 #define LIBRARY_TEXT_START 0x0c000000
2166
2167 #ifndef __ASSEMBLY__
2168+extern pteval_t __supported_pte_mask;
2169+extern pmdval_t __supported_pmd_mask;
2170+
2171 extern void __pte_error(const char *file, int line, pte_t);
2172 extern void __pmd_error(const char *file, int line, pmd_t);
2173 extern void __pgd_error(const char *file, int line, pgd_t);
2174@@ -56,6 +62,48 @@ extern void __pgd_error(const char *file, int line, pgd_t);
2175 #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd)
2176 #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd)
2177
2178+#define __HAVE_ARCH_PAX_OPEN_KERNEL
2179+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
2180+
2181+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2182+#include <asm/domain.h>
2183+#include <linux/thread_info.h>
2184+#include <linux/preempt.h>
2185+
2186+static inline int test_domain(int domain, int domaintype)
2187+{
2188+ return ((current_thread_info()->cpu_domain) & domain_val(domain, 3)) == domain_val(domain, domaintype);
2189+}
2190+#endif
2191+
2192+#ifdef CONFIG_PAX_KERNEXEC
2193+static inline unsigned long pax_open_kernel(void) {
2194+#ifdef CONFIG_ARM_LPAE
2195+ /* TODO */
2196+#else
2197+ preempt_disable();
2198+ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC));
2199+ modify_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC);
2200+#endif
2201+ return 0;
2202+}
2203+
2204+static inline unsigned long pax_close_kernel(void) {
2205+#ifdef CONFIG_ARM_LPAE
2206+ /* TODO */
2207+#else
2208+ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_MANAGER));
2209+ /* DOMAIN_MANAGER = "client" under KERNEXEC */
2210+ modify_domain(DOMAIN_KERNEL, DOMAIN_MANAGER);
2211+ preempt_enable_no_resched();
2212+#endif
2213+ return 0;
2214+}
2215+#else
2216+static inline unsigned long pax_open_kernel(void) { return 0; }
2217+static inline unsigned long pax_close_kernel(void) { return 0; }
2218+#endif
2219+
2220 /*
2221 * This is the lowest virtual address we can permit any user space
2222 * mapping to be mapped at. This is particularly important for
2223@@ -75,8 +123,8 @@ extern void __pgd_error(const char *file, int line, pgd_t);
2224 /*
2225 * The pgprot_* and protection_map entries will be fixed up in runtime
2226 * to include the cachable and bufferable bits based on memory policy,
2227- * as well as any architecture dependent bits like global/ASID and SMP
2228- * shared mapping bits.
2229+ * as well as any architecture dependent bits like global/ASID, PXN,
2230+ * and SMP shared mapping bits.
2231 */
2232 #define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG
2233
2234@@ -269,7 +317,7 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
2235 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
2236 {
2237 const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER |
2238- L_PTE_NONE | L_PTE_VALID;
2239+ L_PTE_NONE | L_PTE_VALID | __supported_pte_mask;
2240 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
2241 return pte;
2242 }
2243diff --git a/arch/arm/include/asm/psci.h b/arch/arm/include/asm/psci.h
2244index c25ef3e..735f14b 100644
2245--- a/arch/arm/include/asm/psci.h
2246+++ b/arch/arm/include/asm/psci.h
2247@@ -32,7 +32,7 @@ struct psci_operations {
2248 int (*affinity_info)(unsigned long target_affinity,
2249 unsigned long lowest_affinity_level);
2250 int (*migrate_info_type)(void);
2251-};
2252+} __no_const;
2253
2254 extern struct psci_operations psci_ops;
2255 extern struct smp_operations psci_smp_ops;
2256diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
2257index 2ec765c..beb1fe16 100644
2258--- a/arch/arm/include/asm/smp.h
2259+++ b/arch/arm/include/asm/smp.h
2260@@ -113,7 +113,7 @@ struct smp_operations {
2261 int (*cpu_disable)(unsigned int cpu);
2262 #endif
2263 #endif
2264-};
2265+} __no_const;
2266
2267 struct of_cpu_method {
2268 const char *method;
2269diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
2270index fc44d37..acc63c4 100644
2271--- a/arch/arm/include/asm/thread_info.h
2272+++ b/arch/arm/include/asm/thread_info.h
2273@@ -89,9 +89,9 @@ struct thread_info {
2274 .flags = 0, \
2275 .preempt_count = INIT_PREEMPT_COUNT, \
2276 .addr_limit = KERNEL_DS, \
2277- .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2278- domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2279- domain_val(DOMAIN_IO, DOMAIN_CLIENT), \
2280+ .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_USERCLIENT) | \
2281+ domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT) | \
2282+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT), \
2283 .restart_block = { \
2284 .fn = do_no_restart_syscall, \
2285 }, \
2286@@ -165,7 +165,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2287 #define TIF_SYSCALL_AUDIT 9
2288 #define TIF_SYSCALL_TRACEPOINT 10
2289 #define TIF_SECCOMP 11 /* seccomp syscall filtering active */
2290-#define TIF_NOHZ 12 /* in adaptive nohz mode */
2291+/* within 8 bits of TIF_SYSCALL_TRACE
2292+ * to meet flexible second operand requirements
2293+ */
2294+#define TIF_GRSEC_SETXID 12
2295+#define TIF_NOHZ 13 /* in adaptive nohz mode */
2296 #define TIF_USING_IWMMXT 17
2297 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
2298 #define TIF_RESTORE_SIGMASK 20
2299@@ -179,10 +183,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2300 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
2301 #define _TIF_SECCOMP (1 << TIF_SECCOMP)
2302 #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
2303+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
2304
2305 /* Checks for any syscall work in entry-common.S */
2306 #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
2307- _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
2308+ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | _TIF_GRSEC_SETXID)
2309
2310 /*
2311 * Change these and you break ASM code in entry-common.S
2312diff --git a/arch/arm/include/asm/tls.h b/arch/arm/include/asm/tls.h
2313index 5f833f7..76e6644 100644
2314--- a/arch/arm/include/asm/tls.h
2315+++ b/arch/arm/include/asm/tls.h
2316@@ -3,6 +3,7 @@
2317
2318 #include <linux/compiler.h>
2319 #include <asm/thread_info.h>
2320+#include <asm/pgtable.h>
2321
2322 #ifdef __ASSEMBLY__
2323 #include <asm/asm-offsets.h>
2324@@ -89,7 +90,9 @@ static inline void set_tls(unsigned long val)
2325 * at 0xffff0fe0 must be used instead. (see
2326 * entry-armv.S for details)
2327 */
2328+ pax_open_kernel();
2329 *((unsigned int *)0xffff0ff0) = val;
2330+ pax_close_kernel();
2331 #endif
2332 }
2333
2334diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
2335index 4767eb9..bf00668 100644
2336--- a/arch/arm/include/asm/uaccess.h
2337+++ b/arch/arm/include/asm/uaccess.h
2338@@ -18,6 +18,7 @@
2339 #include <asm/domain.h>
2340 #include <asm/unified.h>
2341 #include <asm/compiler.h>
2342+#include <asm/pgtable.h>
2343
2344 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2345 #include <asm-generic/uaccess-unaligned.h>
2346@@ -70,11 +71,38 @@ extern int __put_user_bad(void);
2347 static inline void set_fs(mm_segment_t fs)
2348 {
2349 current_thread_info()->addr_limit = fs;
2350- modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
2351+ modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_KERNELCLIENT : DOMAIN_MANAGER);
2352 }
2353
2354 #define segment_eq(a,b) ((a) == (b))
2355
2356+#define __HAVE_ARCH_PAX_OPEN_USERLAND
2357+#define __HAVE_ARCH_PAX_CLOSE_USERLAND
2358+
2359+static inline void pax_open_userland(void)
2360+{
2361+
2362+#ifdef CONFIG_PAX_MEMORY_UDEREF
2363+ if (segment_eq(get_fs(), USER_DS)) {
2364+ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_UDEREF));
2365+ modify_domain(DOMAIN_USER, DOMAIN_UDEREF);
2366+ }
2367+#endif
2368+
2369+}
2370+
2371+static inline void pax_close_userland(void)
2372+{
2373+
2374+#ifdef CONFIG_PAX_MEMORY_UDEREF
2375+ if (segment_eq(get_fs(), USER_DS)) {
2376+ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_NOACCESS));
2377+ modify_domain(DOMAIN_USER, DOMAIN_NOACCESS);
2378+ }
2379+#endif
2380+
2381+}
2382+
2383 #define __addr_ok(addr) ({ \
2384 unsigned long flag; \
2385 __asm__("cmp %2, %0; movlo %0, #0" \
2386@@ -198,8 +226,12 @@ extern int __get_user_64t_4(void *);
2387
2388 #define get_user(x,p) \
2389 ({ \
2390+ int __e; \
2391 might_fault(); \
2392- __get_user_check(x,p); \
2393+ pax_open_userland(); \
2394+ __e = __get_user_check(x,p); \
2395+ pax_close_userland(); \
2396+ __e; \
2397 })
2398
2399 extern int __put_user_1(void *, unsigned int);
2400@@ -244,8 +276,12 @@ extern int __put_user_8(void *, unsigned long long);
2401
2402 #define put_user(x,p) \
2403 ({ \
2404+ int __e; \
2405 might_fault(); \
2406- __put_user_check(x,p); \
2407+ pax_open_userland(); \
2408+ __e = __put_user_check(x,p); \
2409+ pax_close_userland(); \
2410+ __e; \
2411 })
2412
2413 #else /* CONFIG_MMU */
2414@@ -269,6 +305,7 @@ static inline void set_fs(mm_segment_t fs)
2415
2416 #endif /* CONFIG_MMU */
2417
2418+#define access_ok_noprefault(type,addr,size) access_ok((type),(addr),(size))
2419 #define access_ok(type,addr,size) (__range_ok(addr,size) == 0)
2420
2421 #define user_addr_max() \
2422@@ -286,13 +323,17 @@ static inline void set_fs(mm_segment_t fs)
2423 #define __get_user(x,ptr) \
2424 ({ \
2425 long __gu_err = 0; \
2426+ pax_open_userland(); \
2427 __get_user_err((x),(ptr),__gu_err); \
2428+ pax_close_userland(); \
2429 __gu_err; \
2430 })
2431
2432 #define __get_user_error(x,ptr,err) \
2433 ({ \
2434+ pax_open_userland(); \
2435 __get_user_err((x),(ptr),err); \
2436+ pax_close_userland(); \
2437 (void) 0; \
2438 })
2439
2440@@ -368,13 +409,17 @@ do { \
2441 #define __put_user(x,ptr) \
2442 ({ \
2443 long __pu_err = 0; \
2444+ pax_open_userland(); \
2445 __put_user_err((x),(ptr),__pu_err); \
2446+ pax_close_userland(); \
2447 __pu_err; \
2448 })
2449
2450 #define __put_user_error(x,ptr,err) \
2451 ({ \
2452+ pax_open_userland(); \
2453 __put_user_err((x),(ptr),err); \
2454+ pax_close_userland(); \
2455 (void) 0; \
2456 })
2457
2458@@ -474,11 +519,44 @@ do { \
2459
2460
2461 #ifdef CONFIG_MMU
2462-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
2463-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
2464+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
2465+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
2466+
2467+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
2468+{
2469+ unsigned long ret;
2470+
2471+ check_object_size(to, n, false);
2472+ pax_open_userland();
2473+ ret = ___copy_from_user(to, from, n);
2474+ pax_close_userland();
2475+ return ret;
2476+}
2477+
2478+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
2479+{
2480+ unsigned long ret;
2481+
2482+ check_object_size(from, n, true);
2483+ pax_open_userland();
2484+ ret = ___copy_to_user(to, from, n);
2485+ pax_close_userland();
2486+ return ret;
2487+}
2488+
2489 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
2490-extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
2491+extern unsigned long __must_check ___clear_user(void __user *addr, unsigned long n);
2492 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
2493+
2494+static inline unsigned long __must_check __clear_user(void __user *addr, unsigned long n)
2495+{
2496+ unsigned long ret;
2497+ pax_open_userland();
2498+ ret = ___clear_user(addr, n);
2499+ pax_close_userland();
2500+ return ret;
2501+}
2502+
2503 #else
2504 #define __copy_from_user(to,from,n) (memcpy(to, (void __force *)from, n), 0)
2505 #define __copy_to_user(to,from,n) (memcpy((void __force *)to, from, n), 0)
2506@@ -487,6 +565,9 @@ extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned l
2507
2508 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2509 {
2510+ if ((long)n < 0)
2511+ return n;
2512+
2513 if (access_ok(VERIFY_READ, from, n))
2514 n = __copy_from_user(to, from, n);
2515 else /* security hole - plug it */
2516@@ -496,6 +577,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
2517
2518 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2519 {
2520+ if ((long)n < 0)
2521+ return n;
2522+
2523 if (access_ok(VERIFY_WRITE, to, n))
2524 n = __copy_to_user(to, from, n);
2525 return n;
2526diff --git a/arch/arm/include/uapi/asm/ptrace.h b/arch/arm/include/uapi/asm/ptrace.h
2527index 5af0ed1..cea83883 100644
2528--- a/arch/arm/include/uapi/asm/ptrace.h
2529+++ b/arch/arm/include/uapi/asm/ptrace.h
2530@@ -92,7 +92,7 @@
2531 * ARMv7 groups of PSR bits
2532 */
2533 #define APSR_MASK 0xf80f0000 /* N, Z, C, V, Q and GE flags */
2534-#define PSR_ISET_MASK 0x01000010 /* ISA state (J, T) mask */
2535+#define PSR_ISET_MASK 0x01000020 /* ISA state (J, T) mask */
2536 #define PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */
2537 #define PSR_ENDIAN_MASK 0x00000200 /* Endianness state mask */
2538
2539diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
2540index a88671c..1cc895e 100644
2541--- a/arch/arm/kernel/armksyms.c
2542+++ b/arch/arm/kernel/armksyms.c
2543@@ -55,7 +55,7 @@ EXPORT_SYMBOL(arm_delay_ops);
2544
2545 /* networking */
2546 EXPORT_SYMBOL(csum_partial);
2547-EXPORT_SYMBOL(csum_partial_copy_from_user);
2548+EXPORT_SYMBOL(__csum_partial_copy_from_user);
2549 EXPORT_SYMBOL(csum_partial_copy_nocheck);
2550 EXPORT_SYMBOL(__csum_ipv6_magic);
2551
2552@@ -91,9 +91,9 @@ EXPORT_SYMBOL(__memzero);
2553 #ifdef CONFIG_MMU
2554 EXPORT_SYMBOL(copy_page);
2555
2556-EXPORT_SYMBOL(__copy_from_user);
2557-EXPORT_SYMBOL(__copy_to_user);
2558-EXPORT_SYMBOL(__clear_user);
2559+EXPORT_SYMBOL(___copy_from_user);
2560+EXPORT_SYMBOL(___copy_to_user);
2561+EXPORT_SYMBOL(___clear_user);
2562
2563 EXPORT_SYMBOL(__get_user_1);
2564 EXPORT_SYMBOL(__get_user_2);
2565diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
2566index 36276cd..9d7b13b 100644
2567--- a/arch/arm/kernel/entry-armv.S
2568+++ b/arch/arm/kernel/entry-armv.S
2569@@ -47,6 +47,87 @@
2570 9997:
2571 .endm
2572
2573+ .macro pax_enter_kernel
2574+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2575+ @ make aligned space for saved DACR
2576+ sub sp, sp, #8
2577+ @ save regs
2578+ stmdb sp!, {r1, r2}
2579+ @ read DACR from cpu_domain into r1
2580+ mov r2, sp
2581+ @ assume 8K pages, since we have to split the immediate in two
2582+ bic r2, r2, #(0x1fc0)
2583+ bic r2, r2, #(0x3f)
2584+ ldr r1, [r2, #TI_CPU_DOMAIN]
2585+ @ store old DACR on stack
2586+ str r1, [sp, #8]
2587+#ifdef CONFIG_PAX_KERNEXEC
2588+ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2589+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2590+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2591+#endif
2592+#ifdef CONFIG_PAX_MEMORY_UDEREF
2593+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2594+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2595+#endif
2596+ @ write r1 to current_thread_info()->cpu_domain
2597+ str r1, [r2, #TI_CPU_DOMAIN]
2598+ @ write r1 to DACR
2599+ mcr p15, 0, r1, c3, c0, 0
2600+ @ instruction sync
2601+ instr_sync
2602+ @ restore regs
2603+ ldmia sp!, {r1, r2}
2604+#endif
2605+ .endm
2606+
2607+ .macro pax_open_userland
2608+#ifdef CONFIG_PAX_MEMORY_UDEREF
2609+ @ save regs
2610+ stmdb sp!, {r0, r1}
2611+ @ read DACR from cpu_domain into r1
2612+ mov r0, sp
2613+ @ assume 8K pages, since we have to split the immediate in two
2614+ bic r0, r0, #(0x1fc0)
2615+ bic r0, r0, #(0x3f)
2616+ ldr r1, [r0, #TI_CPU_DOMAIN]
2617+ @ set current DOMAIN_USER to DOMAIN_CLIENT
2618+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2619+ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2620+ @ write r1 to current_thread_info()->cpu_domain
2621+ str r1, [r0, #TI_CPU_DOMAIN]
2622+ @ write r1 to DACR
2623+ mcr p15, 0, r1, c3, c0, 0
2624+ @ instruction sync
2625+ instr_sync
2626+ @ restore regs
2627+ ldmia sp!, {r0, r1}
2628+#endif
2629+ .endm
2630+
2631+ .macro pax_close_userland
2632+#ifdef CONFIG_PAX_MEMORY_UDEREF
2633+ @ save regs
2634+ stmdb sp!, {r0, r1}
2635+ @ read DACR from cpu_domain into r1
2636+ mov r0, sp
2637+ @ assume 8K pages, since we have to split the immediate in two
2638+ bic r0, r0, #(0x1fc0)
2639+ bic r0, r0, #(0x3f)
2640+ ldr r1, [r0, #TI_CPU_DOMAIN]
2641+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2642+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2643+ @ write r1 to current_thread_info()->cpu_domain
2644+ str r1, [r0, #TI_CPU_DOMAIN]
2645+ @ write r1 to DACR
2646+ mcr p15, 0, r1, c3, c0, 0
2647+ @ instruction sync
2648+ instr_sync
2649+ @ restore regs
2650+ ldmia sp!, {r0, r1}
2651+#endif
2652+ .endm
2653+
2654 .macro pabt_helper
2655 @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
2656 #ifdef MULTI_PABORT
2657@@ -89,11 +170,15 @@
2658 * Invalid mode handlers
2659 */
2660 .macro inv_entry, reason
2661+
2662+ pax_enter_kernel
2663+
2664 sub sp, sp, #S_FRAME_SIZE
2665 ARM( stmib sp, {r1 - lr} )
2666 THUMB( stmia sp, {r0 - r12} )
2667 THUMB( str sp, [sp, #S_SP] )
2668 THUMB( str lr, [sp, #S_LR] )
2669+
2670 mov r1, #\reason
2671 .endm
2672
2673@@ -149,7 +234,11 @@ ENDPROC(__und_invalid)
2674 .macro svc_entry, stack_hole=0
2675 UNWIND(.fnstart )
2676 UNWIND(.save {r0 - pc} )
2677+
2678+ pax_enter_kernel
2679+
2680 sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
2681+
2682 #ifdef CONFIG_THUMB2_KERNEL
2683 SPFIX( str r0, [sp] ) @ temporarily saved
2684 SPFIX( mov r0, sp )
2685@@ -164,7 +253,12 @@ ENDPROC(__und_invalid)
2686 ldmia r0, {r3 - r5}
2687 add r7, sp, #S_SP - 4 @ here for interlock avoidance
2688 mov r6, #-1 @ "" "" "" ""
2689+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2690+ @ offset sp by 8 as done in pax_enter_kernel
2691+ add r2, sp, #(S_FRAME_SIZE + \stack_hole + 4)
2692+#else
2693 add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
2694+#endif
2695 SPFIX( addeq r2, r2, #4 )
2696 str r3, [sp, #-4]! @ save the "real" r0 copied
2697 @ from the exception stack
2698@@ -317,6 +411,9 @@ ENDPROC(__pabt_svc)
2699 .macro usr_entry
2700 UNWIND(.fnstart )
2701 UNWIND(.cantunwind ) @ don't unwind the user space
2702+
2703+ pax_enter_kernel_user
2704+
2705 sub sp, sp, #S_FRAME_SIZE
2706 ARM( stmib sp, {r1 - r12} )
2707 THUMB( stmia sp, {r0 - r12} )
2708@@ -421,7 +518,9 @@ __und_usr:
2709 tst r3, #PSR_T_BIT @ Thumb mode?
2710 bne __und_usr_thumb
2711 sub r4, r2, #4 @ ARM instr at LR - 4
2712+ pax_open_userland
2713 1: ldrt r0, [r4]
2714+ pax_close_userland
2715 ARM_BE8(rev r0, r0) @ little endian instruction
2716
2717 @ r0 = 32-bit ARM instruction which caused the exception
2718@@ -455,11 +554,15 @@ __und_usr_thumb:
2719 */
2720 .arch armv6t2
2721 #endif
2722+ pax_open_userland
2723 2: ldrht r5, [r4]
2724+ pax_close_userland
2725 ARM_BE8(rev16 r5, r5) @ little endian instruction
2726 cmp r5, #0xe800 @ 32bit instruction if xx != 0
2727 blo __und_usr_fault_16 @ 16bit undefined instruction
2728+ pax_open_userland
2729 3: ldrht r0, [r2]
2730+ pax_close_userland
2731 ARM_BE8(rev16 r0, r0) @ little endian instruction
2732 add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
2733 str r2, [sp, #S_PC] @ it's a 2x16bit instr, update
2734@@ -489,7 +592,8 @@ ENDPROC(__und_usr)
2735 */
2736 .pushsection .fixup, "ax"
2737 .align 2
2738-4: str r4, [sp, #S_PC] @ retry current instruction
2739+4: pax_close_userland
2740+ str r4, [sp, #S_PC] @ retry current instruction
2741 ret r9
2742 .popsection
2743 .pushsection __ex_table,"a"
2744@@ -698,7 +802,7 @@ ENTRY(__switch_to)
2745 THUMB( str lr, [ip], #4 )
2746 ldr r4, [r2, #TI_TP_VALUE]
2747 ldr r5, [r2, #TI_TP_VALUE + 4]
2748-#ifdef CONFIG_CPU_USE_DOMAINS
2749+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2750 ldr r6, [r2, #TI_CPU_DOMAIN]
2751 #endif
2752 switch_tls r1, r4, r5, r3, r7
2753@@ -707,7 +811,7 @@ ENTRY(__switch_to)
2754 ldr r8, =__stack_chk_guard
2755 ldr r7, [r7, #TSK_STACK_CANARY]
2756 #endif
2757-#ifdef CONFIG_CPU_USE_DOMAINS
2758+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2759 mcr p15, 0, r6, c3, c0, 0 @ Set domain register
2760 #endif
2761 mov r5, r0
2762diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
2763index e52fe5a..1b0a924 100644
2764--- a/arch/arm/kernel/entry-common.S
2765+++ b/arch/arm/kernel/entry-common.S
2766@@ -11,18 +11,46 @@
2767 #include <asm/assembler.h>
2768 #include <asm/unistd.h>
2769 #include <asm/ftrace.h>
2770+#include <asm/domain.h>
2771 #include <asm/unwind.h>
2772
2773+#include "entry-header.S"
2774+
2775 #ifdef CONFIG_NEED_RET_TO_USER
2776 #include <mach/entry-macro.S>
2777 #else
2778 .macro arch_ret_to_user, tmp1, tmp2
2779+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2780+ @ save regs
2781+ stmdb sp!, {r1, r2}
2782+ @ read DACR from cpu_domain into r1
2783+ mov r2, sp
2784+ @ assume 8K pages, since we have to split the immediate in two
2785+ bic r2, r2, #(0x1fc0)
2786+ bic r2, r2, #(0x3f)
2787+ ldr r1, [r2, #TI_CPU_DOMAIN]
2788+#ifdef CONFIG_PAX_KERNEXEC
2789+ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2790+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2791+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2792+#endif
2793+#ifdef CONFIG_PAX_MEMORY_UDEREF
2794+ @ set current DOMAIN_USER to DOMAIN_UDEREF
2795+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2796+ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2797+#endif
2798+ @ write r1 to current_thread_info()->cpu_domain
2799+ str r1, [r2, #TI_CPU_DOMAIN]
2800+ @ write r1 to DACR
2801+ mcr p15, 0, r1, c3, c0, 0
2802+ @ instruction sync
2803+ instr_sync
2804+ @ restore regs
2805+ ldmia sp!, {r1, r2}
2806+#endif
2807 .endm
2808 #endif
2809
2810-#include "entry-header.S"
2811-
2812-
2813 .align 5
2814 /*
2815 * This is the fast syscall return path. We do as little as
2816@@ -406,6 +434,12 @@ ENTRY(vector_swi)
2817 USER( ldr scno, [lr, #-4] ) @ get SWI instruction
2818 #endif
2819
2820+ /*
2821+ * do this here to avoid a performance hit of wrapping the code above
2822+ * that directly dereferences userland to parse the SWI instruction
2823+ */
2824+ pax_enter_kernel_user
2825+
2826 adr tbl, sys_call_table @ load syscall table pointer
2827
2828 #if defined(CONFIG_OABI_COMPAT)
2829diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
2830index 2fdf867..6e909e4 100644
2831--- a/arch/arm/kernel/entry-header.S
2832+++ b/arch/arm/kernel/entry-header.S
2833@@ -188,6 +188,60 @@
2834 msr cpsr_c, \rtemp @ switch back to the SVC mode
2835 .endm
2836
2837+ .macro pax_enter_kernel_user
2838+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2839+ @ save regs
2840+ stmdb sp!, {r0, r1}
2841+ @ read DACR from cpu_domain into r1
2842+ mov r0, sp
2843+ @ assume 8K pages, since we have to split the immediate in two
2844+ bic r0, r0, #(0x1fc0)
2845+ bic r0, r0, #(0x3f)
2846+ ldr r1, [r0, #TI_CPU_DOMAIN]
2847+#ifdef CONFIG_PAX_MEMORY_UDEREF
2848+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2849+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2850+#endif
2851+#ifdef CONFIG_PAX_KERNEXEC
2852+ @ set current DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2853+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2854+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2855+#endif
2856+ @ write r1 to current_thread_info()->cpu_domain
2857+ str r1, [r0, #TI_CPU_DOMAIN]
2858+ @ write r1 to DACR
2859+ mcr p15, 0, r1, c3, c0, 0
2860+ @ instruction sync
2861+ instr_sync
2862+ @ restore regs
2863+ ldmia sp!, {r0, r1}
2864+#endif
2865+ .endm
2866+
2867+ .macro pax_exit_kernel
2868+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2869+ @ save regs
2870+ stmdb sp!, {r0, r1}
2871+ @ read old DACR from stack into r1
2872+ ldr r1, [sp, #(8 + S_SP)]
2873+ sub r1, r1, #8
2874+ ldr r1, [r1]
2875+
2876+ @ write r1 to current_thread_info()->cpu_domain
2877+ mov r0, sp
2878+ @ assume 8K pages, since we have to split the immediate in two
2879+ bic r0, r0, #(0x1fc0)
2880+ bic r0, r0, #(0x3f)
2881+ str r1, [r0, #TI_CPU_DOMAIN]
2882+ @ write r1 to DACR
2883+ mcr p15, 0, r1, c3, c0, 0
2884+ @ instruction sync
2885+ instr_sync
2886+ @ restore regs
2887+ ldmia sp!, {r0, r1}
2888+#endif
2889+ .endm
2890+
2891 #ifndef CONFIG_THUMB2_KERNEL
2892 .macro svc_exit, rpsr, irq = 0
2893 .if \irq != 0
2894@@ -207,6 +261,9 @@
2895 blne trace_hardirqs_off
2896 #endif
2897 .endif
2898+
2899+ pax_exit_kernel
2900+
2901 msr spsr_cxsf, \rpsr
2902 #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
2903 @ We must avoid clrex due to Cortex-A15 erratum #830321
2904@@ -254,6 +311,9 @@
2905 blne trace_hardirqs_off
2906 #endif
2907 .endif
2908+
2909+ pax_exit_kernel
2910+
2911 ldr lr, [sp, #S_SP] @ top of the stack
2912 ldrd r0, r1, [sp, #S_LR] @ calling lr and pc
2913
2914diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c
2915index 918875d..cd5fa27 100644
2916--- a/arch/arm/kernel/fiq.c
2917+++ b/arch/arm/kernel/fiq.c
2918@@ -87,7 +87,10 @@ void set_fiq_handler(void *start, unsigned int length)
2919 void *base = vectors_page;
2920 unsigned offset = FIQ_OFFSET;
2921
2922+ pax_open_kernel();
2923 memcpy(base + offset, start, length);
2924+ pax_close_kernel();
2925+
2926 if (!cache_is_vipt_nonaliasing())
2927 flush_icache_range((unsigned long)base + offset, offset +
2928 length);
2929diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
2930index 664eee8..f470938 100644
2931--- a/arch/arm/kernel/head.S
2932+++ b/arch/arm/kernel/head.S
2933@@ -437,7 +437,7 @@ __enable_mmu:
2934 mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2935 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2936 domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
2937- domain_val(DOMAIN_IO, DOMAIN_CLIENT))
2938+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT))
2939 mcr p15, 0, r5, c3, c0, 0 @ load domain access register
2940 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
2941 #endif
2942diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
2943index 6a4dffe..4a86a70 100644
2944--- a/arch/arm/kernel/module.c
2945+++ b/arch/arm/kernel/module.c
2946@@ -38,12 +38,39 @@
2947 #endif
2948
2949 #ifdef CONFIG_MMU
2950-void *module_alloc(unsigned long size)
2951+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
2952 {
2953+ if (!size || PAGE_ALIGN(size) > MODULES_END - MODULES_VADDR)
2954+ return NULL;
2955 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
2956- GFP_KERNEL, PAGE_KERNEL_EXEC, NUMA_NO_NODE,
2957+ GFP_KERNEL, prot, NUMA_NO_NODE,
2958 __builtin_return_address(0));
2959 }
2960+
2961+void *module_alloc(unsigned long size)
2962+{
2963+
2964+#ifdef CONFIG_PAX_KERNEXEC
2965+ return __module_alloc(size, PAGE_KERNEL);
2966+#else
2967+ return __module_alloc(size, PAGE_KERNEL_EXEC);
2968+#endif
2969+
2970+}
2971+
2972+#ifdef CONFIG_PAX_KERNEXEC
2973+void module_free_exec(struct module *mod, void *module_region)
2974+{
2975+ module_free(mod, module_region);
2976+}
2977+EXPORT_SYMBOL(module_free_exec);
2978+
2979+void *module_alloc_exec(unsigned long size)
2980+{
2981+ return __module_alloc(size, PAGE_KERNEL_EXEC);
2982+}
2983+EXPORT_SYMBOL(module_alloc_exec);
2984+#endif
2985 #endif
2986
2987 int
2988diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c
2989index 07314af..c46655c 100644
2990--- a/arch/arm/kernel/patch.c
2991+++ b/arch/arm/kernel/patch.c
2992@@ -18,6 +18,7 @@ void __kprobes __patch_text(void *addr, unsigned int insn)
2993 bool thumb2 = IS_ENABLED(CONFIG_THUMB2_KERNEL);
2994 int size;
2995
2996+ pax_open_kernel();
2997 if (thumb2 && __opcode_is_thumb16(insn)) {
2998 *(u16 *)addr = __opcode_to_mem_thumb16(insn);
2999 size = sizeof(u16);
3000@@ -39,6 +40,7 @@ void __kprobes __patch_text(void *addr, unsigned int insn)
3001 *(u32 *)addr = insn;
3002 size = sizeof(u32);
3003 }
3004+ pax_close_kernel();
3005
3006 flush_icache_range((uintptr_t)(addr),
3007 (uintptr_t)(addr) + size);
3008diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
3009index a35f6eb..7af43a0 100644
3010--- a/arch/arm/kernel/process.c
3011+++ b/arch/arm/kernel/process.c
3012@@ -212,6 +212,7 @@ void machine_power_off(void)
3013
3014 if (pm_power_off)
3015 pm_power_off();
3016+ BUG();
3017 }
3018
3019 /*
3020@@ -225,7 +226,7 @@ void machine_power_off(void)
3021 * executing pre-reset code, and using RAM that the primary CPU's code wishes
3022 * to use. Implementing such co-ordination would be essentially impossible.
3023 */
3024-void machine_restart(char *cmd)
3025+__noreturn void machine_restart(char *cmd)
3026 {
3027 local_irq_disable();
3028 smp_send_stop();
3029@@ -248,8 +249,8 @@ void __show_regs(struct pt_regs *regs)
3030
3031 show_regs_print_info(KERN_DEFAULT);
3032
3033- print_symbol("PC is at %s\n", instruction_pointer(regs));
3034- print_symbol("LR is at %s\n", regs->ARM_lr);
3035+ printk("PC is at %pA\n", (void *)instruction_pointer(regs));
3036+ printk("LR is at %pA\n", (void *)regs->ARM_lr);
3037 printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n"
3038 "sp : %08lx ip : %08lx fp : %08lx\n",
3039 regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr,
3040@@ -427,12 +428,6 @@ unsigned long get_wchan(struct task_struct *p)
3041 return 0;
3042 }
3043
3044-unsigned long arch_randomize_brk(struct mm_struct *mm)
3045-{
3046- unsigned long range_end = mm->brk + 0x02000000;
3047- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
3048-}
3049-
3050 #ifdef CONFIG_MMU
3051 #ifdef CONFIG_KUSER_HELPERS
3052 /*
3053@@ -448,7 +443,7 @@ static struct vm_area_struct gate_vma = {
3054
3055 static int __init gate_vma_init(void)
3056 {
3057- gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
3058+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
3059 return 0;
3060 }
3061 arch_initcall(gate_vma_init);
3062@@ -474,41 +469,16 @@ int in_gate_area_no_mm(unsigned long addr)
3063
3064 const char *arch_vma_name(struct vm_area_struct *vma)
3065 {
3066- return is_gate_vma(vma) ? "[vectors]" :
3067- (vma->vm_mm && vma->vm_start == vma->vm_mm->context.sigpage) ?
3068- "[sigpage]" : NULL;
3069+ return is_gate_vma(vma) ? "[vectors]" : NULL;
3070 }
3071
3072-static struct page *signal_page;
3073-extern struct page *get_signal_page(void);
3074-
3075 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
3076 {
3077 struct mm_struct *mm = current->mm;
3078- unsigned long addr;
3079- int ret;
3080-
3081- if (!signal_page)
3082- signal_page = get_signal_page();
3083- if (!signal_page)
3084- return -ENOMEM;
3085
3086 down_write(&mm->mmap_sem);
3087- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
3088- if (IS_ERR_VALUE(addr)) {
3089- ret = addr;
3090- goto up_fail;
3091- }
3092-
3093- ret = install_special_mapping(mm, addr, PAGE_SIZE,
3094- VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
3095- &signal_page);
3096-
3097- if (ret == 0)
3098- mm->context.sigpage = addr;
3099-
3100- up_fail:
3101+ mm->context.sigpage = (PAGE_OFFSET + (get_random_int() % 0x3FFEFFE0)) & 0xFFFFFFFC;
3102 up_write(&mm->mmap_sem);
3103- return ret;
3104+ return 0;
3105 }
3106 #endif
3107diff --git a/arch/arm/kernel/psci.c b/arch/arm/kernel/psci.c
3108index f73891b..cf3004e 100644
3109--- a/arch/arm/kernel/psci.c
3110+++ b/arch/arm/kernel/psci.c
3111@@ -28,7 +28,7 @@
3112 #include <asm/psci.h>
3113 #include <asm/system_misc.h>
3114
3115-struct psci_operations psci_ops;
3116+struct psci_operations psci_ops __read_only;
3117
3118 static int (*invoke_psci_fn)(u32, u32, u32, u32);
3119 typedef int (*psci_initcall_t)(const struct device_node *);
3120diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
3121index 0c27ed6..b67388e 100644
3122--- a/arch/arm/kernel/ptrace.c
3123+++ b/arch/arm/kernel/ptrace.c
3124@@ -928,10 +928,19 @@ static void tracehook_report_syscall(struct pt_regs *regs,
3125 regs->ARM_ip = ip;
3126 }
3127
3128+#ifdef CONFIG_GRKERNSEC_SETXID
3129+extern void gr_delayed_cred_worker(void);
3130+#endif
3131+
3132 asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno)
3133 {
3134 current_thread_info()->syscall = scno;
3135
3136+#ifdef CONFIG_GRKERNSEC_SETXID
3137+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
3138+ gr_delayed_cred_worker();
3139+#endif
3140+
3141 /* Do the secure computing check first; failures should be fast. */
3142 if (secure_computing(scno) == -1)
3143 return -1;
3144diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
3145index 84db893d..bd8213a 100644
3146--- a/arch/arm/kernel/setup.c
3147+++ b/arch/arm/kernel/setup.c
3148@@ -104,21 +104,23 @@ EXPORT_SYMBOL(elf_hwcap);
3149 unsigned int elf_hwcap2 __read_mostly;
3150 EXPORT_SYMBOL(elf_hwcap2);
3151
3152+pteval_t __supported_pte_mask __read_only;
3153+pmdval_t __supported_pmd_mask __read_only;
3154
3155 #ifdef MULTI_CPU
3156-struct processor processor __read_mostly;
3157+struct processor processor __read_only;
3158 #endif
3159 #ifdef MULTI_TLB
3160-struct cpu_tlb_fns cpu_tlb __read_mostly;
3161+struct cpu_tlb_fns cpu_tlb __read_only;
3162 #endif
3163 #ifdef MULTI_USER
3164-struct cpu_user_fns cpu_user __read_mostly;
3165+struct cpu_user_fns cpu_user __read_only;
3166 #endif
3167 #ifdef MULTI_CACHE
3168-struct cpu_cache_fns cpu_cache __read_mostly;
3169+struct cpu_cache_fns cpu_cache __read_only;
3170 #endif
3171 #ifdef CONFIG_OUTER_CACHE
3172-struct outer_cache_fns outer_cache __read_mostly;
3173+struct outer_cache_fns outer_cache __read_only;
3174 EXPORT_SYMBOL(outer_cache);
3175 #endif
3176
3177@@ -251,9 +253,13 @@ static int __get_cpu_architecture(void)
3178 asm("mrc p15, 0, %0, c0, c1, 4"
3179 : "=r" (mmfr0));
3180 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
3181- (mmfr0 & 0x000000f0) >= 0x00000030)
3182+ (mmfr0 & 0x000000f0) >= 0x00000030) {
3183 cpu_arch = CPU_ARCH_ARMv7;
3184- else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
3185+ if ((mmfr0 & 0x0000000f) == 0x00000005 || (mmfr0 & 0x0000000f) == 0x00000004) {
3186+ __supported_pte_mask |= L_PTE_PXN;
3187+ __supported_pmd_mask |= PMD_PXNTABLE;
3188+ }
3189+ } else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
3190 (mmfr0 & 0x000000f0) == 0x00000020)
3191 cpu_arch = CPU_ARCH_ARMv6;
3192 else
3193diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
3194index bd19834..e4d8c66 100644
3195--- a/arch/arm/kernel/signal.c
3196+++ b/arch/arm/kernel/signal.c
3197@@ -24,8 +24,6 @@
3198
3199 extern const unsigned long sigreturn_codes[7];
3200
3201-static unsigned long signal_return_offset;
3202-
3203 #ifdef CONFIG_CRUNCH
3204 static int preserve_crunch_context(struct crunch_sigframe __user *frame)
3205 {
3206@@ -396,8 +394,7 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig,
3207 * except when the MPU has protected the vectors
3208 * page from PL0
3209 */
3210- retcode = mm->context.sigpage + signal_return_offset +
3211- (idx << 2) + thumb;
3212+ retcode = mm->context.sigpage + (idx << 2) + thumb;
3213 } else
3214 #endif
3215 {
3216@@ -604,33 +601,3 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
3217 } while (thread_flags & _TIF_WORK_MASK);
3218 return 0;
3219 }
3220-
3221-struct page *get_signal_page(void)
3222-{
3223- unsigned long ptr;
3224- unsigned offset;
3225- struct page *page;
3226- void *addr;
3227-
3228- page = alloc_pages(GFP_KERNEL, 0);
3229-
3230- if (!page)
3231- return NULL;
3232-
3233- addr = page_address(page);
3234-
3235- /* Give the signal return code some randomness */
3236- offset = 0x200 + (get_random_int() & 0x7fc);
3237- signal_return_offset = offset;
3238-
3239- /*
3240- * Copy signal return handlers into the vector page, and
3241- * set sigreturn to be a pointer to these.
3242- */
3243- memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes));
3244-
3245- ptr = (unsigned long)addr + offset;
3246- flush_icache_range(ptr, ptr + sizeof(sigreturn_codes));
3247-
3248- return page;
3249-}
3250diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
3251index bbe22fc..d7737f5 100644
3252--- a/arch/arm/kernel/smp.c
3253+++ b/arch/arm/kernel/smp.c
3254@@ -76,7 +76,7 @@ enum ipi_msg_type {
3255
3256 static DECLARE_COMPLETION(cpu_running);
3257
3258-static struct smp_operations smp_ops;
3259+static struct smp_operations smp_ops __read_only;
3260
3261 void __init smp_set_ops(struct smp_operations *ops)
3262 {
3263diff --git a/arch/arm/kernel/tcm.c b/arch/arm/kernel/tcm.c
3264index 7a3be1d..b00c7de 100644
3265--- a/arch/arm/kernel/tcm.c
3266+++ b/arch/arm/kernel/tcm.c
3267@@ -61,7 +61,7 @@ static struct map_desc itcm_iomap[] __initdata = {
3268 .virtual = ITCM_OFFSET,
3269 .pfn = __phys_to_pfn(ITCM_OFFSET),
3270 .length = 0,
3271- .type = MT_MEMORY_RWX_ITCM,
3272+ .type = MT_MEMORY_RX_ITCM,
3273 }
3274 };
3275
3276@@ -267,7 +267,9 @@ no_dtcm:
3277 start = &__sitcm_text;
3278 end = &__eitcm_text;
3279 ram = &__itcm_start;
3280+ pax_open_kernel();
3281 memcpy(start, ram, itcm_code_sz);
3282+ pax_close_kernel();
3283 pr_debug("CPU ITCM: copied code from %p - %p\n",
3284 start, end);
3285 itcm_present = true;
3286diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
3287index a964c9f..cf2a5b1 100644
3288--- a/arch/arm/kernel/traps.c
3289+++ b/arch/arm/kernel/traps.c
3290@@ -64,7 +64,7 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long);
3291 void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
3292 {
3293 #ifdef CONFIG_KALLSYMS
3294- printk("[<%08lx>] (%ps) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
3295+ printk("[<%08lx>] (%pA) from [<%08lx>] (%pA)\n", where, (void *)where, from, (void *)from);
3296 #else
3297 printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
3298 #endif
3299@@ -266,6 +266,8 @@ static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
3300 static int die_owner = -1;
3301 static unsigned int die_nest_count;
3302
3303+extern void gr_handle_kernel_exploit(void);
3304+
3305 static unsigned long oops_begin(void)
3306 {
3307 int cpu;
3308@@ -308,6 +310,9 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
3309 panic("Fatal exception in interrupt");
3310 if (panic_on_oops)
3311 panic("Fatal exception");
3312+
3313+ gr_handle_kernel_exploit();
3314+
3315 if (signr)
3316 do_exit(signr);
3317 }
3318@@ -887,7 +892,11 @@ void __init early_trap_init(void *vectors_base)
3319 kuser_init(vectors_base);
3320
3321 flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
3322- modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
3323+
3324+#ifndef CONFIG_PAX_MEMORY_UDEREF
3325+ modify_domain(DOMAIN_USER, DOMAIN_USERCLIENT);
3326+#endif
3327+
3328 #else /* ifndef CONFIG_CPU_V7M */
3329 /*
3330 * on V7-M there is no need to copy the vector table to a dedicated
3331diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
3332index 6f57cb9..645f8c4 100644
3333--- a/arch/arm/kernel/vmlinux.lds.S
3334+++ b/arch/arm/kernel/vmlinux.lds.S
3335@@ -8,7 +8,11 @@
3336 #include <asm/thread_info.h>
3337 #include <asm/memory.h>
3338 #include <asm/page.h>
3339-
3340+
3341+#ifdef CONFIG_PAX_KERNEXEC
3342+#include <asm/pgtable.h>
3343+#endif
3344+
3345 #define PROC_INFO \
3346 . = ALIGN(4); \
3347 VMLINUX_SYMBOL(__proc_info_begin) = .; \
3348@@ -34,7 +38,7 @@
3349 #endif
3350
3351 #if (defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)) || \
3352- defined(CONFIG_GENERIC_BUG)
3353+ defined(CONFIG_GENERIC_BUG) || defined(CONFIG_PAX_REFCOUNT)
3354 #define ARM_EXIT_KEEP(x) x
3355 #define ARM_EXIT_DISCARD(x)
3356 #else
3357@@ -90,6 +94,11 @@ SECTIONS
3358 _text = .;
3359 HEAD_TEXT
3360 }
3361+
3362+#ifdef CONFIG_PAX_KERNEXEC
3363+ . = ALIGN(1<<SECTION_SHIFT);
3364+#endif
3365+
3366 .text : { /* Real text segment */
3367 _stext = .; /* Text and read-only data */
3368 __exception_text_start = .;
3369@@ -112,6 +121,8 @@ SECTIONS
3370 ARM_CPU_KEEP(PROC_INFO)
3371 }
3372
3373+ _etext = .; /* End of text section */
3374+
3375 RO_DATA(PAGE_SIZE)
3376
3377 . = ALIGN(4);
3378@@ -142,7 +153,9 @@ SECTIONS
3379
3380 NOTES
3381
3382- _etext = .; /* End of text and rodata section */
3383+#ifdef CONFIG_PAX_KERNEXEC
3384+ . = ALIGN(1<<SECTION_SHIFT);
3385+#endif
3386
3387 #ifndef CONFIG_XIP_KERNEL
3388 . = ALIGN(PAGE_SIZE);
3389@@ -220,6 +233,11 @@ SECTIONS
3390 . = PAGE_OFFSET + TEXT_OFFSET;
3391 #else
3392 __init_end = .;
3393+
3394+#ifdef CONFIG_PAX_KERNEXEC
3395+ . = ALIGN(1<<SECTION_SHIFT);
3396+#endif
3397+
3398 . = ALIGN(THREAD_SIZE);
3399 __data_loc = .;
3400 #endif
3401diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
3402index a99e0cd..ab56421d 100644
3403--- a/arch/arm/kvm/arm.c
3404+++ b/arch/arm/kvm/arm.c
3405@@ -57,7 +57,7 @@ static unsigned long hyp_default_vectors;
3406 static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu);
3407
3408 /* The VMID used in the VTTBR */
3409-static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
3410+static atomic64_unchecked_t kvm_vmid_gen = ATOMIC64_INIT(1);
3411 static u8 kvm_next_vmid;
3412 static DEFINE_SPINLOCK(kvm_vmid_lock);
3413
3414@@ -372,7 +372,7 @@ void force_vm_exit(const cpumask_t *mask)
3415 */
3416 static bool need_new_vmid_gen(struct kvm *kvm)
3417 {
3418- return unlikely(kvm->arch.vmid_gen != atomic64_read(&kvm_vmid_gen));
3419+ return unlikely(kvm->arch.vmid_gen != atomic64_read_unchecked(&kvm_vmid_gen));
3420 }
3421
3422 /**
3423@@ -405,7 +405,7 @@ static void update_vttbr(struct kvm *kvm)
3424
3425 /* First user of a new VMID generation? */
3426 if (unlikely(kvm_next_vmid == 0)) {
3427- atomic64_inc(&kvm_vmid_gen);
3428+ atomic64_inc_unchecked(&kvm_vmid_gen);
3429 kvm_next_vmid = 1;
3430
3431 /*
3432@@ -422,7 +422,7 @@ static void update_vttbr(struct kvm *kvm)
3433 kvm_call_hyp(__kvm_flush_vm_context);
3434 }
3435
3436- kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen);
3437+ kvm->arch.vmid_gen = atomic64_read_unchecked(&kvm_vmid_gen);
3438 kvm->arch.vmid = kvm_next_vmid;
3439 kvm_next_vmid++;
3440
3441@@ -997,7 +997,7 @@ static void check_kvm_target_cpu(void *ret)
3442 /**
3443 * Initialize Hyp-mode and memory mappings on all CPUs.
3444 */
3445-int kvm_arch_init(void *opaque)
3446+int kvm_arch_init(const void *opaque)
3447 {
3448 int err;
3449 int ret, cpu;
3450diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S
3451index 14a0d98..7771a7d 100644
3452--- a/arch/arm/lib/clear_user.S
3453+++ b/arch/arm/lib/clear_user.S
3454@@ -12,14 +12,14 @@
3455
3456 .text
3457
3458-/* Prototype: int __clear_user(void *addr, size_t sz)
3459+/* Prototype: int ___clear_user(void *addr, size_t sz)
3460 * Purpose : clear some user memory
3461 * Params : addr - user memory address to clear
3462 * : sz - number of bytes to clear
3463 * Returns : number of bytes NOT cleared
3464 */
3465 ENTRY(__clear_user_std)
3466-WEAK(__clear_user)
3467+WEAK(___clear_user)
3468 stmfd sp!, {r1, lr}
3469 mov r2, #0
3470 cmp r1, #4
3471@@ -44,7 +44,7 @@ WEAK(__clear_user)
3472 USER( strnebt r2, [r0])
3473 mov r0, #0
3474 ldmfd sp!, {r1, pc}
3475-ENDPROC(__clear_user)
3476+ENDPROC(___clear_user)
3477 ENDPROC(__clear_user_std)
3478
3479 .pushsection .fixup,"ax"
3480diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
3481index 66a477a..bee61d3 100644
3482--- a/arch/arm/lib/copy_from_user.S
3483+++ b/arch/arm/lib/copy_from_user.S
3484@@ -16,7 +16,7 @@
3485 /*
3486 * Prototype:
3487 *
3488- * size_t __copy_from_user(void *to, const void *from, size_t n)
3489+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
3490 *
3491 * Purpose:
3492 *
3493@@ -84,11 +84,11 @@
3494
3495 .text
3496
3497-ENTRY(__copy_from_user)
3498+ENTRY(___copy_from_user)
3499
3500 #include "copy_template.S"
3501
3502-ENDPROC(__copy_from_user)
3503+ENDPROC(___copy_from_user)
3504
3505 .pushsection .fixup,"ax"
3506 .align 0
3507diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
3508index 6ee2f67..d1cce76 100644
3509--- a/arch/arm/lib/copy_page.S
3510+++ b/arch/arm/lib/copy_page.S
3511@@ -10,6 +10,7 @@
3512 * ASM optimised string functions
3513 */
3514 #include <linux/linkage.h>
3515+#include <linux/const.h>
3516 #include <asm/assembler.h>
3517 #include <asm/asm-offsets.h>
3518 #include <asm/cache.h>
3519diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
3520index d066df6..df28194 100644
3521--- a/arch/arm/lib/copy_to_user.S
3522+++ b/arch/arm/lib/copy_to_user.S
3523@@ -16,7 +16,7 @@
3524 /*
3525 * Prototype:
3526 *
3527- * size_t __copy_to_user(void *to, const void *from, size_t n)
3528+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
3529 *
3530 * Purpose:
3531 *
3532@@ -88,11 +88,11 @@
3533 .text
3534
3535 ENTRY(__copy_to_user_std)
3536-WEAK(__copy_to_user)
3537+WEAK(___copy_to_user)
3538
3539 #include "copy_template.S"
3540
3541-ENDPROC(__copy_to_user)
3542+ENDPROC(___copy_to_user)
3543 ENDPROC(__copy_to_user_std)
3544
3545 .pushsection .fixup,"ax"
3546diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
3547index 7d08b43..f7ca7ea 100644
3548--- a/arch/arm/lib/csumpartialcopyuser.S
3549+++ b/arch/arm/lib/csumpartialcopyuser.S
3550@@ -57,8 +57,8 @@
3551 * Returns : r0 = checksum, [[sp, #0], #0] = 0 or -EFAULT
3552 */
3553
3554-#define FN_ENTRY ENTRY(csum_partial_copy_from_user)
3555-#define FN_EXIT ENDPROC(csum_partial_copy_from_user)
3556+#define FN_ENTRY ENTRY(__csum_partial_copy_from_user)
3557+#define FN_EXIT ENDPROC(__csum_partial_copy_from_user)
3558
3559 #include "csumpartialcopygeneric.S"
3560
3561diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c
3562index 312d43e..21d2322 100644
3563--- a/arch/arm/lib/delay.c
3564+++ b/arch/arm/lib/delay.c
3565@@ -29,7 +29,7 @@
3566 /*
3567 * Default to the loop-based delay implementation.
3568 */
3569-struct arm_delay_ops arm_delay_ops = {
3570+struct arm_delay_ops arm_delay_ops __read_only = {
3571 .delay = __loop_delay,
3572 .const_udelay = __loop_const_udelay,
3573 .udelay = __loop_udelay,
3574diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
3575index 3e58d71..029817c 100644
3576--- a/arch/arm/lib/uaccess_with_memcpy.c
3577+++ b/arch/arm/lib/uaccess_with_memcpy.c
3578@@ -136,7 +136,7 @@ out:
3579 }
3580
3581 unsigned long
3582-__copy_to_user(void __user *to, const void *from, unsigned long n)
3583+___copy_to_user(void __user *to, const void *from, unsigned long n)
3584 {
3585 /*
3586 * This test is stubbed out of the main function above to keep
3587@@ -190,7 +190,7 @@ out:
3588 return n;
3589 }
3590
3591-unsigned long __clear_user(void __user *addr, unsigned long n)
3592+unsigned long ___clear_user(void __user *addr, unsigned long n)
3593 {
3594 /* See rational for this in __copy_to_user() above. */
3595 if (n < 64)
3596diff --git a/arch/arm/mach-at91/setup.c b/arch/arm/mach-at91/setup.c
3597index f7a07a5..258e1f7 100644
3598--- a/arch/arm/mach-at91/setup.c
3599+++ b/arch/arm/mach-at91/setup.c
3600@@ -81,7 +81,7 @@ void __init at91_init_sram(int bank, unsigned long base, unsigned int length)
3601
3602 desc->pfn = __phys_to_pfn(base);
3603 desc->length = length;
3604- desc->type = MT_MEMORY_RWX_NONCACHED;
3605+ desc->type = MT_MEMORY_RW_NONCACHED;
3606
3607 pr_info("AT91: sram at 0x%lx of 0x%x mapped at 0x%lx\n",
3608 base, length, desc->virtual);
3609diff --git a/arch/arm/mach-keystone/keystone.c b/arch/arm/mach-keystone/keystone.c
3610index 7f352de..6dc0929 100644
3611--- a/arch/arm/mach-keystone/keystone.c
3612+++ b/arch/arm/mach-keystone/keystone.c
3613@@ -27,7 +27,7 @@
3614
3615 #include "keystone.h"
3616
3617-static struct notifier_block platform_nb;
3618+static notifier_block_no_const platform_nb;
3619 static unsigned long keystone_dma_pfn_offset __read_mostly;
3620
3621 static int keystone_platform_notifier(struct notifier_block *nb,
3622diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c
3623index 2bdc323..cf1c607 100644
3624--- a/arch/arm/mach-mvebu/coherency.c
3625+++ b/arch/arm/mach-mvebu/coherency.c
3626@@ -316,7 +316,7 @@ static void __init armada_370_coherency_init(struct device_node *np)
3627
3628 /*
3629 * This ioremap hook is used on Armada 375/38x to ensure that PCIe
3630- * memory areas are mapped as MT_UNCACHED instead of MT_DEVICE. This
3631+ * memory areas are mapped as MT_UNCACHED_RW instead of MT_DEVICE. This
3632 * is needed as a workaround for a deadlock issue between the PCIe
3633 * interface and the cache controller.
3634 */
3635@@ -329,7 +329,7 @@ armada_pcie_wa_ioremap_caller(phys_addr_t phys_addr, size_t size,
3636 mvebu_mbus_get_pcie_mem_aperture(&pcie_mem);
3637
3638 if (pcie_mem.start <= phys_addr && (phys_addr + size) <= pcie_mem.end)
3639- mtype = MT_UNCACHED;
3640+ mtype = MT_UNCACHED_RW;
3641
3642 return __arm_ioremap_caller(phys_addr, size, mtype, caller);
3643 }
3644diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
3645index aead77a..a2253fa 100644
3646--- a/arch/arm/mach-omap2/board-n8x0.c
3647+++ b/arch/arm/mach-omap2/board-n8x0.c
3648@@ -568,7 +568,7 @@ static int n8x0_menelaus_late_init(struct device *dev)
3649 }
3650 #endif
3651
3652-static struct menelaus_platform_data n8x0_menelaus_platform_data __initdata = {
3653+static struct menelaus_platform_data n8x0_menelaus_platform_data __initconst = {
3654 .late_init = n8x0_menelaus_late_init,
3655 };
3656
3657diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c
3658index 2f97228..6ce10e1 100644
3659--- a/arch/arm/mach-omap2/gpmc.c
3660+++ b/arch/arm/mach-omap2/gpmc.c
3661@@ -151,7 +151,6 @@ struct omap3_gpmc_regs {
3662 };
3663
3664 static struct gpmc_client_irq gpmc_client_irq[GPMC_NR_IRQ];
3665-static struct irq_chip gpmc_irq_chip;
3666 static int gpmc_irq_start;
3667
3668 static struct resource gpmc_mem_root;
3669@@ -736,6 +735,18 @@ static void gpmc_irq_noop(struct irq_data *data) { }
3670
3671 static unsigned int gpmc_irq_noop_ret(struct irq_data *data) { return 0; }
3672
3673+static struct irq_chip gpmc_irq_chip = {
3674+ .name = "gpmc",
3675+ .irq_startup = gpmc_irq_noop_ret,
3676+ .irq_enable = gpmc_irq_enable,
3677+ .irq_disable = gpmc_irq_disable,
3678+ .irq_shutdown = gpmc_irq_noop,
3679+ .irq_ack = gpmc_irq_noop,
3680+ .irq_mask = gpmc_irq_noop,
3681+ .irq_unmask = gpmc_irq_noop,
3682+
3683+};
3684+
3685 static int gpmc_setup_irq(void)
3686 {
3687 int i;
3688@@ -750,15 +761,6 @@ static int gpmc_setup_irq(void)
3689 return gpmc_irq_start;
3690 }
3691
3692- gpmc_irq_chip.name = "gpmc";
3693- gpmc_irq_chip.irq_startup = gpmc_irq_noop_ret;
3694- gpmc_irq_chip.irq_enable = gpmc_irq_enable;
3695- gpmc_irq_chip.irq_disable = gpmc_irq_disable;
3696- gpmc_irq_chip.irq_shutdown = gpmc_irq_noop;
3697- gpmc_irq_chip.irq_ack = gpmc_irq_noop;
3698- gpmc_irq_chip.irq_mask = gpmc_irq_noop;
3699- gpmc_irq_chip.irq_unmask = gpmc_irq_noop;
3700-
3701 gpmc_client_irq[0].bitmask = GPMC_IRQ_FIFOEVENTENABLE;
3702 gpmc_client_irq[1].bitmask = GPMC_IRQ_COUNT_EVENT;
3703
3704diff --git a/arch/arm/mach-omap2/omap-mpuss-lowpower.c b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
3705index 4001325..b14e2a0 100644
3706--- a/arch/arm/mach-omap2/omap-mpuss-lowpower.c
3707+++ b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
3708@@ -84,7 +84,7 @@ struct cpu_pm_ops {
3709 int (*finish_suspend)(unsigned long cpu_state);
3710 void (*resume)(void);
3711 void (*scu_prepare)(unsigned int cpu_id, unsigned int cpu_state);
3712-};
3713+} __no_const;
3714
3715 static DEFINE_PER_CPU(struct omap4_cpu_pm_info, omap4_pm_info);
3716 static struct powerdomain *mpuss_pd;
3717@@ -102,7 +102,7 @@ static void dummy_cpu_resume(void)
3718 static void dummy_scu_prepare(unsigned int cpu_id, unsigned int cpu_state)
3719 {}
3720
3721-struct cpu_pm_ops omap_pm_ops = {
3722+static struct cpu_pm_ops omap_pm_ops __read_only = {
3723 .finish_suspend = default_finish_suspend,
3724 .resume = dummy_cpu_resume,
3725 .scu_prepare = dummy_scu_prepare,
3726diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
3727index 37843a7..a98df13 100644
3728--- a/arch/arm/mach-omap2/omap-wakeupgen.c
3729+++ b/arch/arm/mach-omap2/omap-wakeupgen.c
3730@@ -343,7 +343,7 @@ static int irq_cpu_hotplug_notify(struct notifier_block *self,
3731 return NOTIFY_OK;
3732 }
3733
3734-static struct notifier_block __refdata irq_hotplug_notifier = {
3735+static struct notifier_block irq_hotplug_notifier = {
3736 .notifier_call = irq_cpu_hotplug_notify,
3737 };
3738
3739diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
3740index d22c30d..23697a1 100644
3741--- a/arch/arm/mach-omap2/omap_device.c
3742+++ b/arch/arm/mach-omap2/omap_device.c
3743@@ -510,7 +510,7 @@ void omap_device_delete(struct omap_device *od)
3744 struct platform_device __init *omap_device_build(const char *pdev_name,
3745 int pdev_id,
3746 struct omap_hwmod *oh,
3747- void *pdata, int pdata_len)
3748+ const void *pdata, int pdata_len)
3749 {
3750 struct omap_hwmod *ohs[] = { oh };
3751
3752@@ -538,7 +538,7 @@ struct platform_device __init *omap_device_build(const char *pdev_name,
3753 struct platform_device __init *omap_device_build_ss(const char *pdev_name,
3754 int pdev_id,
3755 struct omap_hwmod **ohs,
3756- int oh_cnt, void *pdata,
3757+ int oh_cnt, const void *pdata,
3758 int pdata_len)
3759 {
3760 int ret = -ENOMEM;
3761diff --git a/arch/arm/mach-omap2/omap_device.h b/arch/arm/mach-omap2/omap_device.h
3762index 78c02b3..c94109a 100644
3763--- a/arch/arm/mach-omap2/omap_device.h
3764+++ b/arch/arm/mach-omap2/omap_device.h
3765@@ -72,12 +72,12 @@ int omap_device_idle(struct platform_device *pdev);
3766 /* Core code interface */
3767
3768 struct platform_device *omap_device_build(const char *pdev_name, int pdev_id,
3769- struct omap_hwmod *oh, void *pdata,
3770+ struct omap_hwmod *oh, const void *pdata,
3771 int pdata_len);
3772
3773 struct platform_device *omap_device_build_ss(const char *pdev_name, int pdev_id,
3774 struct omap_hwmod **oh, int oh_cnt,
3775- void *pdata, int pdata_len);
3776+ const void *pdata, int pdata_len);
3777
3778 struct omap_device *omap_device_alloc(struct platform_device *pdev,
3779 struct omap_hwmod **ohs, int oh_cnt);
3780diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
3781index 9e91a4e..357ed0d 100644
3782--- a/arch/arm/mach-omap2/omap_hwmod.c
3783+++ b/arch/arm/mach-omap2/omap_hwmod.c
3784@@ -194,10 +194,10 @@ struct omap_hwmod_soc_ops {
3785 int (*init_clkdm)(struct omap_hwmod *oh);
3786 void (*update_context_lost)(struct omap_hwmod *oh);
3787 int (*get_context_lost)(struct omap_hwmod *oh);
3788-};
3789+} __no_const;
3790
3791 /* soc_ops: adapts the omap_hwmod code to the currently-booted SoC */
3792-static struct omap_hwmod_soc_ops soc_ops;
3793+static struct omap_hwmod_soc_ops soc_ops __read_only;
3794
3795 /* omap_hwmod_list contains all registered struct omap_hwmods */
3796 static LIST_HEAD(omap_hwmod_list);
3797diff --git a/arch/arm/mach-omap2/powerdomains43xx_data.c b/arch/arm/mach-omap2/powerdomains43xx_data.c
3798index 95fee54..cfa9cf1 100644
3799--- a/arch/arm/mach-omap2/powerdomains43xx_data.c
3800+++ b/arch/arm/mach-omap2/powerdomains43xx_data.c
3801@@ -10,6 +10,7 @@
3802
3803 #include <linux/kernel.h>
3804 #include <linux/init.h>
3805+#include <asm/pgtable.h>
3806
3807 #include "powerdomain.h"
3808
3809@@ -129,7 +130,9 @@ static int am43xx_check_vcvp(void)
3810
3811 void __init am43xx_powerdomains_init(void)
3812 {
3813- omap4_pwrdm_operations.pwrdm_has_voltdm = am43xx_check_vcvp;
3814+ pax_open_kernel();
3815+ *(void **)&omap4_pwrdm_operations.pwrdm_has_voltdm = am43xx_check_vcvp;
3816+ pax_close_kernel();
3817 pwrdm_register_platform_funcs(&omap4_pwrdm_operations);
3818 pwrdm_register_pwrdms(powerdomains_am43xx);
3819 pwrdm_complete_init();
3820diff --git a/arch/arm/mach-omap2/wd_timer.c b/arch/arm/mach-omap2/wd_timer.c
3821index 97d6607..8429d14 100644
3822--- a/arch/arm/mach-omap2/wd_timer.c
3823+++ b/arch/arm/mach-omap2/wd_timer.c
3824@@ -110,7 +110,9 @@ static int __init omap_init_wdt(void)
3825 struct omap_hwmod *oh;
3826 char *oh_name = "wd_timer2";
3827 char *dev_name = "omap_wdt";
3828- struct omap_wd_timer_platform_data pdata;
3829+ static struct omap_wd_timer_platform_data pdata = {
3830+ .read_reset_sources = prm_read_reset_sources
3831+ };
3832
3833 if (!cpu_class_is_omap2() || of_have_populated_dt())
3834 return 0;
3835@@ -121,8 +123,6 @@ static int __init omap_init_wdt(void)
3836 return -EINVAL;
3837 }
3838
3839- pdata.read_reset_sources = prm_read_reset_sources;
3840-
3841 pdev = omap_device_build(dev_name, id, oh, &pdata,
3842 sizeof(struct omap_wd_timer_platform_data));
3843 WARN(IS_ERR(pdev), "Can't build omap_device for %s:%s.\n",
3844diff --git a/arch/arm/mach-tegra/cpuidle-tegra20.c b/arch/arm/mach-tegra/cpuidle-tegra20.c
3845index b30bf5c..d0825bf 100644
3846--- a/arch/arm/mach-tegra/cpuidle-tegra20.c
3847+++ b/arch/arm/mach-tegra/cpuidle-tegra20.c
3848@@ -180,7 +180,7 @@ static int tegra20_idle_lp2_coupled(struct cpuidle_device *dev,
3849 bool entered_lp2 = false;
3850
3851 if (tegra_pending_sgi())
3852- ACCESS_ONCE(abort_flag) = true;
3853+ ACCESS_ONCE_RW(abort_flag) = true;
3854
3855 cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
3856
3857diff --git a/arch/arm/mach-ux500/setup.h b/arch/arm/mach-ux500/setup.h
3858index 2dea8b5..6499da2 100644
3859--- a/arch/arm/mach-ux500/setup.h
3860+++ b/arch/arm/mach-ux500/setup.h
3861@@ -33,13 +33,6 @@ extern void ux500_timer_init(void);
3862 .type = MT_DEVICE, \
3863 }
3864
3865-#define __MEM_DEV_DESC(x, sz) { \
3866- .virtual = IO_ADDRESS(x), \
3867- .pfn = __phys_to_pfn(x), \
3868- .length = sz, \
3869- .type = MT_MEMORY_RWX, \
3870-}
3871-
3872 extern struct smp_operations ux500_smp_ops;
3873 extern void ux500_cpu_die(unsigned int cpu);
3874
3875diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
3876index ae69809..2665202 100644
3877--- a/arch/arm/mm/Kconfig
3878+++ b/arch/arm/mm/Kconfig
3879@@ -446,6 +446,7 @@ config CPU_32v5
3880
3881 config CPU_32v6
3882 bool
3883+ select CPU_USE_DOMAINS if CPU_V6 && MMU && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
3884 select TLS_REG_EMUL if !CPU_32v6K && !MMU
3885
3886 config CPU_32v6K
3887@@ -600,6 +601,7 @@ config CPU_CP15_MPU
3888
3889 config CPU_USE_DOMAINS
3890 bool
3891+ depends on !ARM_LPAE && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
3892 help
3893 This option enables or disables the use of domain switching
3894 via the set_fs() function.
3895@@ -799,6 +801,7 @@ config NEED_KUSER_HELPERS
3896 config KUSER_HELPERS
3897 bool "Enable kuser helpers in vector page" if !NEED_KUSER_HELPERS
3898 default y
3899+ depends on !(CPU_V6 || CPU_V6K || CPU_V7) || GRKERNSEC_OLD_ARM_USERLAND
3900 help
3901 Warning: disabling this option may break user programs.
3902
3903@@ -811,7 +814,7 @@ config KUSER_HELPERS
3904 See Documentation/arm/kernel_user_helpers.txt for details.
3905
3906 However, the fixed address nature of these helpers can be used
3907- by ROP (return orientated programming) authors when creating
3908+ by ROP (Return Oriented Programming) authors when creating
3909 exploits.
3910
3911 If all of the binaries and libraries which run on your platform
3912diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
3913index 83792f4..c25d36b 100644
3914--- a/arch/arm/mm/alignment.c
3915+++ b/arch/arm/mm/alignment.c
3916@@ -216,10 +216,12 @@ union offset_union {
3917 #define __get16_unaligned_check(ins,val,addr) \
3918 do { \
3919 unsigned int err = 0, v, a = addr; \
3920+ pax_open_userland(); \
3921 __get8_unaligned_check(ins,v,a,err); \
3922 val = v << ((BE) ? 8 : 0); \
3923 __get8_unaligned_check(ins,v,a,err); \
3924 val |= v << ((BE) ? 0 : 8); \
3925+ pax_close_userland(); \
3926 if (err) \
3927 goto fault; \
3928 } while (0)
3929@@ -233,6 +235,7 @@ union offset_union {
3930 #define __get32_unaligned_check(ins,val,addr) \
3931 do { \
3932 unsigned int err = 0, v, a = addr; \
3933+ pax_open_userland(); \
3934 __get8_unaligned_check(ins,v,a,err); \
3935 val = v << ((BE) ? 24 : 0); \
3936 __get8_unaligned_check(ins,v,a,err); \
3937@@ -241,6 +244,7 @@ union offset_union {
3938 val |= v << ((BE) ? 8 : 16); \
3939 __get8_unaligned_check(ins,v,a,err); \
3940 val |= v << ((BE) ? 0 : 24); \
3941+ pax_close_userland(); \
3942 if (err) \
3943 goto fault; \
3944 } while (0)
3945@@ -254,6 +258,7 @@ union offset_union {
3946 #define __put16_unaligned_check(ins,val,addr) \
3947 do { \
3948 unsigned int err = 0, v = val, a = addr; \
3949+ pax_open_userland(); \
3950 __asm__( FIRST_BYTE_16 \
3951 ARM( "1: "ins" %1, [%2], #1\n" ) \
3952 THUMB( "1: "ins" %1, [%2]\n" ) \
3953@@ -273,6 +278,7 @@ union offset_union {
3954 " .popsection\n" \
3955 : "=r" (err), "=&r" (v), "=&r" (a) \
3956 : "0" (err), "1" (v), "2" (a)); \
3957+ pax_close_userland(); \
3958 if (err) \
3959 goto fault; \
3960 } while (0)
3961@@ -286,6 +292,7 @@ union offset_union {
3962 #define __put32_unaligned_check(ins,val,addr) \
3963 do { \
3964 unsigned int err = 0, v = val, a = addr; \
3965+ pax_open_userland(); \
3966 __asm__( FIRST_BYTE_32 \
3967 ARM( "1: "ins" %1, [%2], #1\n" ) \
3968 THUMB( "1: "ins" %1, [%2]\n" ) \
3969@@ -315,6 +322,7 @@ union offset_union {
3970 " .popsection\n" \
3971 : "=r" (err), "=&r" (v), "=&r" (a) \
3972 : "0" (err), "1" (v), "2" (a)); \
3973+ pax_close_userland(); \
3974 if (err) \
3975 goto fault; \
3976 } while (0)
3977diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
3978index 5f2c988..221412d 100644
3979--- a/arch/arm/mm/cache-l2x0.c
3980+++ b/arch/arm/mm/cache-l2x0.c
3981@@ -41,7 +41,7 @@ struct l2c_init_data {
3982 void (*fixup)(void __iomem *, u32, struct outer_cache_fns *);
3983 void (*save)(void __iomem *);
3984 struct outer_cache_fns outer_cache;
3985-};
3986+} __do_const;
3987
3988 #define CACHE_LINE_SIZE 32
3989
3990diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
3991index 6eb97b3..ac509f6 100644
3992--- a/arch/arm/mm/context.c
3993+++ b/arch/arm/mm/context.c
3994@@ -43,7 +43,7 @@
3995 #define NUM_USER_ASIDS ASID_FIRST_VERSION
3996
3997 static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
3998-static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
3999+static atomic64_unchecked_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
4000 static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);
4001
4002 static DEFINE_PER_CPU(atomic64_t, active_asids);
4003@@ -182,7 +182,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
4004 {
4005 static u32 cur_idx = 1;
4006 u64 asid = atomic64_read(&mm->context.id);
4007- u64 generation = atomic64_read(&asid_generation);
4008+ u64 generation = atomic64_read_unchecked(&asid_generation);
4009
4010 if (asid != 0 && is_reserved_asid(asid)) {
4011 /*
4012@@ -203,7 +203,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
4013 */
4014 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
4015 if (asid == NUM_USER_ASIDS) {
4016- generation = atomic64_add_return(ASID_FIRST_VERSION,
4017+ generation = atomic64_add_return_unchecked(ASID_FIRST_VERSION,
4018 &asid_generation);
4019 flush_context(cpu);
4020 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
4021@@ -234,14 +234,14 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
4022 cpu_set_reserved_ttbr0();
4023
4024 asid = atomic64_read(&mm->context.id);
4025- if (!((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS)
4026+ if (!((asid ^ atomic64_read_unchecked(&asid_generation)) >> ASID_BITS)
4027 && atomic64_xchg(&per_cpu(active_asids, cpu), asid))
4028 goto switch_mm_fastpath;
4029
4030 raw_spin_lock_irqsave(&cpu_asid_lock, flags);
4031 /* Check that our ASID belongs to the current generation. */
4032 asid = atomic64_read(&mm->context.id);
4033- if ((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) {
4034+ if ((asid ^ atomic64_read_unchecked(&asid_generation)) >> ASID_BITS) {
4035 asid = new_context(mm, cpu);
4036 atomic64_set(&mm->context.id, asid);
4037 }
4038diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
4039index eb8830a..e8ff52e 100644
4040--- a/arch/arm/mm/fault.c
4041+++ b/arch/arm/mm/fault.c
4042@@ -25,6 +25,7 @@
4043 #include <asm/system_misc.h>
4044 #include <asm/system_info.h>
4045 #include <asm/tlbflush.h>
4046+#include <asm/sections.h>
4047
4048 #include "fault.h"
4049
4050@@ -138,6 +139,31 @@ __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
4051 if (fixup_exception(regs))
4052 return;
4053
4054+#ifdef CONFIG_PAX_MEMORY_UDEREF
4055+ if (addr < TASK_SIZE) {
4056+ if (current->signal->curr_ip)
4057+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
4058+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
4059+ else
4060+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current),
4061+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
4062+ }
4063+#endif
4064+
4065+#ifdef CONFIG_PAX_KERNEXEC
4066+ if ((fsr & FSR_WRITE) &&
4067+ (((unsigned long)_stext <= addr && addr < init_mm.end_code) ||
4068+ (MODULES_VADDR <= addr && addr < MODULES_END)))
4069+ {
4070+ if (current->signal->curr_ip)
4071+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
4072+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
4073+ else
4074+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
4075+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
4076+ }
4077+#endif
4078+
4079 /*
4080 * No handler, we'll have to terminate things with extreme prejudice.
4081 */
4082@@ -174,6 +200,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
4083 }
4084 #endif
4085
4086+#ifdef CONFIG_PAX_PAGEEXEC
4087+ if (fsr & FSR_LNX_PF) {
4088+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
4089+ do_group_exit(SIGKILL);
4090+ }
4091+#endif
4092+
4093 tsk->thread.address = addr;
4094 tsk->thread.error_code = fsr;
4095 tsk->thread.trap_no = 14;
4096@@ -401,6 +434,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
4097 }
4098 #endif /* CONFIG_MMU */
4099
4100+#ifdef CONFIG_PAX_PAGEEXEC
4101+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4102+{
4103+ long i;
4104+
4105+ printk(KERN_ERR "PAX: bytes at PC: ");
4106+ for (i = 0; i < 20; i++) {
4107+ unsigned char c;
4108+ if (get_user(c, (__force unsigned char __user *)pc+i))
4109+ printk(KERN_CONT "?? ");
4110+ else
4111+ printk(KERN_CONT "%02x ", c);
4112+ }
4113+ printk("\n");
4114+
4115+ printk(KERN_ERR "PAX: bytes at SP-4: ");
4116+ for (i = -1; i < 20; i++) {
4117+ unsigned long c;
4118+ if (get_user(c, (__force unsigned long __user *)sp+i))
4119+ printk(KERN_CONT "???????? ");
4120+ else
4121+ printk(KERN_CONT "%08lx ", c);
4122+ }
4123+ printk("\n");
4124+}
4125+#endif
4126+
4127 /*
4128 * First Level Translation Fault Handler
4129 *
4130@@ -548,9 +608,22 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
4131 const struct fsr_info *inf = fsr_info + fsr_fs(fsr);
4132 struct siginfo info;
4133
4134+#ifdef CONFIG_PAX_MEMORY_UDEREF
4135+ if (addr < TASK_SIZE && is_domain_fault(fsr)) {
4136+ if (current->signal->curr_ip)
4137+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
4138+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
4139+ else
4140+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current),
4141+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
4142+ goto die;
4143+ }
4144+#endif
4145+
4146 if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
4147 return;
4148
4149+die:
4150 printk(KERN_ALERT "Unhandled fault: %s (0x%03x) at 0x%08lx\n",
4151 inf->name, fsr, addr);
4152
4153@@ -574,15 +647,104 @@ hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *
4154 ifsr_info[nr].name = name;
4155 }
4156
4157+asmlinkage int sys_sigreturn(struct pt_regs *regs);
4158+asmlinkage int sys_rt_sigreturn(struct pt_regs *regs);
4159+
4160 asmlinkage void __exception
4161 do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
4162 {
4163 const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
4164 struct siginfo info;
4165+ unsigned long pc = instruction_pointer(regs);
4166+
4167+ if (user_mode(regs)) {
4168+ unsigned long sigpage = current->mm->context.sigpage;
4169+
4170+ if (sigpage <= pc && pc < sigpage + 7*4) {
4171+ if (pc < sigpage + 3*4)
4172+ sys_sigreturn(regs);
4173+ else
4174+ sys_rt_sigreturn(regs);
4175+ return;
4176+ }
4177+ if (pc == 0xffff0f60UL) {
4178+ /*
4179+ * PaX: __kuser_cmpxchg64 emulation
4180+ */
4181+ // TODO
4182+ //regs->ARM_pc = regs->ARM_lr;
4183+ //return;
4184+ }
4185+ if (pc == 0xffff0fa0UL) {
4186+ /*
4187+ * PaX: __kuser_memory_barrier emulation
4188+ */
4189+ // dmb(); implied by the exception
4190+ regs->ARM_pc = regs->ARM_lr;
4191+ return;
4192+ }
4193+ if (pc == 0xffff0fc0UL) {
4194+ /*
4195+ * PaX: __kuser_cmpxchg emulation
4196+ */
4197+ // TODO
4198+ //long new;
4199+ //int op;
4200+
4201+ //op = FUTEX_OP_SET << 28;
4202+ //new = futex_atomic_op_inuser(op, regs->ARM_r2);
4203+ //regs->ARM_r0 = old != new;
4204+ //regs->ARM_pc = regs->ARM_lr;
4205+ //return;
4206+ }
4207+ if (pc == 0xffff0fe0UL) {
4208+ /*
4209+ * PaX: __kuser_get_tls emulation
4210+ */
4211+ regs->ARM_r0 = current_thread_info()->tp_value[0];
4212+ regs->ARM_pc = regs->ARM_lr;
4213+ return;
4214+ }
4215+ }
4216+
4217+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
4218+ else if (is_domain_fault(ifsr) || is_xn_fault(ifsr)) {
4219+ if (current->signal->curr_ip)
4220+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
4221+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
4222+ pc >= TASK_SIZE ? "non-executable kernel" : "userland", pc);
4223+ else
4224+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", current->comm, task_pid_nr(current),
4225+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
4226+ pc >= TASK_SIZE ? "non-executable kernel" : "userland", pc);
4227+ goto die;
4228+ }
4229+#endif
4230+
4231+#ifdef CONFIG_PAX_REFCOUNT
4232+ if (fsr_fs(ifsr) == FAULT_CODE_DEBUG) {
4233+#ifdef CONFIG_THUMB2_KERNEL
4234+ unsigned short bkpt;
4235+
4236+ if (!probe_kernel_address(pc, bkpt) && cpu_to_le16(bkpt) == 0xbef1) {
4237+#else
4238+ unsigned int bkpt;
4239+
4240+ if (!probe_kernel_address(pc, bkpt) && cpu_to_le32(bkpt) == 0xe12f1073) {
4241+#endif
4242+ current->thread.error_code = ifsr;
4243+ current->thread.trap_no = 0;
4244+ pax_report_refcount_overflow(regs);
4245+ fixup_exception(regs);
4246+ return;
4247+ }
4248+ }
4249+#endif
4250
4251 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
4252 return;
4253
4254+die:
4255 printk(KERN_ALERT "Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
4256 inf->name, ifsr, addr);
4257
4258diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h
4259index cf08bdf..772656c 100644
4260--- a/arch/arm/mm/fault.h
4261+++ b/arch/arm/mm/fault.h
4262@@ -3,6 +3,7 @@
4263
4264 /*
4265 * Fault status register encodings. We steal bit 31 for our own purposes.
4266+ * Set when the FSR value is from an instruction fault.
4267 */
4268 #define FSR_LNX_PF (1 << 31)
4269 #define FSR_WRITE (1 << 11)
4270@@ -22,6 +23,17 @@ static inline int fsr_fs(unsigned int fsr)
4271 }
4272 #endif
4273
4274+/* valid for LPAE and !LPAE */
4275+static inline int is_xn_fault(unsigned int fsr)
4276+{
4277+ return ((fsr_fs(fsr) & 0x3c) == 0xc);
4278+}
4279+
4280+static inline int is_domain_fault(unsigned int fsr)
4281+{
4282+ return ((fsr_fs(fsr) & 0xD) == 0x9);
4283+}
4284+
4285 void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
4286 unsigned long search_exception_table(unsigned long addr);
4287
4288diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
4289index 659c75d..6f8c029 100644
4290--- a/arch/arm/mm/init.c
4291+++ b/arch/arm/mm/init.c
4292@@ -31,6 +31,8 @@
4293 #include <asm/setup.h>
4294 #include <asm/tlb.h>
4295 #include <asm/fixmap.h>
4296+#include <asm/system_info.h>
4297+#include <asm/cp15.h>
4298
4299 #include <asm/mach/arch.h>
4300 #include <asm/mach/map.h>
4301@@ -619,7 +621,46 @@ void free_initmem(void)
4302 {
4303 #ifdef CONFIG_HAVE_TCM
4304 extern char __tcm_start, __tcm_end;
4305+#endif
4306
4307+#ifdef CONFIG_PAX_KERNEXEC
4308+ unsigned long addr;
4309+ pgd_t *pgd;
4310+ pud_t *pud;
4311+ pmd_t *pmd;
4312+ int cpu_arch = cpu_architecture();
4313+ unsigned int cr = get_cr();
4314+
4315+ if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
4316+ /* make pages tables, etc before .text NX */
4317+ for (addr = PAGE_OFFSET; addr < (unsigned long)_stext; addr += SECTION_SIZE) {
4318+ pgd = pgd_offset_k(addr);
4319+ pud = pud_offset(pgd, addr);
4320+ pmd = pmd_offset(pud, addr);
4321+ __section_update(pmd, addr, PMD_SECT_XN);
4322+ }
4323+ /* make init NX */
4324+ for (addr = (unsigned long)__init_begin; addr < (unsigned long)_sdata; addr += SECTION_SIZE) {
4325+ pgd = pgd_offset_k(addr);
4326+ pud = pud_offset(pgd, addr);
4327+ pmd = pmd_offset(pud, addr);
4328+ __section_update(pmd, addr, PMD_SECT_XN);
4329+ }
4330+ /* make kernel code/rodata RX */
4331+ for (addr = (unsigned long)_stext; addr < (unsigned long)__init_begin; addr += SECTION_SIZE) {
4332+ pgd = pgd_offset_k(addr);
4333+ pud = pud_offset(pgd, addr);
4334+ pmd = pmd_offset(pud, addr);
4335+#ifdef CONFIG_ARM_LPAE
4336+ __section_update(pmd, addr, PMD_SECT_RDONLY);
4337+#else
4338+ __section_update(pmd, addr, PMD_SECT_APX|PMD_SECT_AP_WRITE);
4339+#endif
4340+ }
4341+ }
4342+#endif
4343+
4344+#ifdef CONFIG_HAVE_TCM
4345 poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
4346 free_reserved_area(&__tcm_start, &__tcm_end, -1, "TCM link");
4347 #endif
4348diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
4349index d1e5ad7..84dcbf2 100644
4350--- a/arch/arm/mm/ioremap.c
4351+++ b/arch/arm/mm/ioremap.c
4352@@ -392,9 +392,9 @@ __arm_ioremap_exec(phys_addr_t phys_addr, size_t size, bool cached)
4353 unsigned int mtype;
4354
4355 if (cached)
4356- mtype = MT_MEMORY_RWX;
4357+ mtype = MT_MEMORY_RX;
4358 else
4359- mtype = MT_MEMORY_RWX_NONCACHED;
4360+ mtype = MT_MEMORY_RX_NONCACHED;
4361
4362 return __arm_ioremap_caller(phys_addr, size, mtype,
4363 __builtin_return_address(0));
4364diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
4365index 5e85ed3..b10a7ed 100644
4366--- a/arch/arm/mm/mmap.c
4367+++ b/arch/arm/mm/mmap.c
4368@@ -59,6 +59,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4369 struct vm_area_struct *vma;
4370 int do_align = 0;
4371 int aliasing = cache_is_vipt_aliasing();
4372+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
4373 struct vm_unmapped_area_info info;
4374
4375 /*
4376@@ -81,6 +82,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4377 if (len > TASK_SIZE)
4378 return -ENOMEM;
4379
4380+#ifdef CONFIG_PAX_RANDMMAP
4381+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4382+#endif
4383+
4384 if (addr) {
4385 if (do_align)
4386 addr = COLOUR_ALIGN(addr, pgoff);
4387@@ -88,8 +93,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4388 addr = PAGE_ALIGN(addr);
4389
4390 vma = find_vma(mm, addr);
4391- if (TASK_SIZE - len >= addr &&
4392- (!vma || addr + len <= vma->vm_start))
4393+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4394 return addr;
4395 }
4396
4397@@ -99,6 +103,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4398 info.high_limit = TASK_SIZE;
4399 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
4400 info.align_offset = pgoff << PAGE_SHIFT;
4401+ info.threadstack_offset = offset;
4402 return vm_unmapped_area(&info);
4403 }
4404
4405@@ -112,6 +117,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4406 unsigned long addr = addr0;
4407 int do_align = 0;
4408 int aliasing = cache_is_vipt_aliasing();
4409+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
4410 struct vm_unmapped_area_info info;
4411
4412 /*
4413@@ -132,6 +138,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4414 return addr;
4415 }
4416
4417+#ifdef CONFIG_PAX_RANDMMAP
4418+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4419+#endif
4420+
4421 /* requesting a specific address */
4422 if (addr) {
4423 if (do_align)
4424@@ -139,8 +149,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4425 else
4426 addr = PAGE_ALIGN(addr);
4427 vma = find_vma(mm, addr);
4428- if (TASK_SIZE - len >= addr &&
4429- (!vma || addr + len <= vma->vm_start))
4430+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4431 return addr;
4432 }
4433
4434@@ -150,6 +159,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4435 info.high_limit = mm->mmap_base;
4436 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
4437 info.align_offset = pgoff << PAGE_SHIFT;
4438+ info.threadstack_offset = offset;
4439 addr = vm_unmapped_area(&info);
4440
4441 /*
4442@@ -173,6 +183,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4443 {
4444 unsigned long random_factor = 0UL;
4445
4446+#ifdef CONFIG_PAX_RANDMMAP
4447+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4448+#endif
4449+
4450 /* 8 bits of randomness in 20 address space bits */
4451 if ((current->flags & PF_RANDOMIZE) &&
4452 !(current->personality & ADDR_NO_RANDOMIZE))
4453@@ -180,9 +194,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4454
4455 if (mmap_is_legacy()) {
4456 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
4457+
4458+#ifdef CONFIG_PAX_RANDMMAP
4459+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4460+ mm->mmap_base += mm->delta_mmap;
4461+#endif
4462+
4463 mm->get_unmapped_area = arch_get_unmapped_area;
4464 } else {
4465 mm->mmap_base = mmap_base(random_factor);
4466+
4467+#ifdef CONFIG_PAX_RANDMMAP
4468+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4469+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4470+#endif
4471+
4472 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4473 }
4474 }
4475diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
4476index 8348ed6..b73a807 100644
4477--- a/arch/arm/mm/mmu.c
4478+++ b/arch/arm/mm/mmu.c
4479@@ -40,6 +40,22 @@
4480 #include "mm.h"
4481 #include "tcm.h"
4482
4483+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
4484+void modify_domain(unsigned int dom, unsigned int type)
4485+{
4486+ struct thread_info *thread = current_thread_info();
4487+ unsigned int domain = thread->cpu_domain;
4488+ /*
4489+ * DOMAIN_MANAGER might be defined to some other value,
4490+ * use the arch-defined constant
4491+ */
4492+ domain &= ~domain_val(dom, 3);
4493+ thread->cpu_domain = domain | domain_val(dom, type);
4494+ set_domain(thread->cpu_domain);
4495+}
4496+EXPORT_SYMBOL(modify_domain);
4497+#endif
4498+
4499 /*
4500 * empty_zero_page is a special page that is used for
4501 * zero-initialized data and COW.
4502@@ -239,7 +255,15 @@ __setup("noalign", noalign_setup);
4503 #define PROT_PTE_S2_DEVICE PROT_PTE_DEVICE
4504 #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
4505
4506-static struct mem_type mem_types[] = {
4507+#ifdef CONFIG_PAX_KERNEXEC
4508+#define L_PTE_KERNEXEC L_PTE_RDONLY
4509+#define PMD_SECT_KERNEXEC PMD_SECT_RDONLY
4510+#else
4511+#define L_PTE_KERNEXEC L_PTE_DIRTY
4512+#define PMD_SECT_KERNEXEC PMD_SECT_AP_WRITE
4513+#endif
4514+
4515+static struct mem_type mem_types[] __read_only = {
4516 [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
4517 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
4518 L_PTE_SHARED,
4519@@ -268,19 +292,19 @@ static struct mem_type mem_types[] = {
4520 .prot_sect = PROT_SECT_DEVICE,
4521 .domain = DOMAIN_IO,
4522 },
4523- [MT_UNCACHED] = {
4524+ [MT_UNCACHED_RW] = {
4525 .prot_pte = PROT_PTE_DEVICE,
4526 .prot_l1 = PMD_TYPE_TABLE,
4527 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4528 .domain = DOMAIN_IO,
4529 },
4530- [MT_CACHECLEAN] = {
4531- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4532+ [MT_CACHECLEAN_RO] = {
4533+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_RDONLY,
4534 .domain = DOMAIN_KERNEL,
4535 },
4536 #ifndef CONFIG_ARM_LPAE
4537- [MT_MINICLEAN] = {
4538- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
4539+ [MT_MINICLEAN_RO] = {
4540+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_MINICACHE | PMD_SECT_XN | PMD_SECT_RDONLY,
4541 .domain = DOMAIN_KERNEL,
4542 },
4543 #endif
4544@@ -288,15 +312,15 @@ static struct mem_type mem_types[] = {
4545 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4546 L_PTE_RDONLY,
4547 .prot_l1 = PMD_TYPE_TABLE,
4548- .domain = DOMAIN_USER,
4549+ .domain = DOMAIN_VECTORS,
4550 },
4551 [MT_HIGH_VECTORS] = {
4552 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4553 L_PTE_USER | L_PTE_RDONLY,
4554 .prot_l1 = PMD_TYPE_TABLE,
4555- .domain = DOMAIN_USER,
4556+ .domain = DOMAIN_VECTORS,
4557 },
4558- [MT_MEMORY_RWX] = {
4559+ [__MT_MEMORY_RWX] = {
4560 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4561 .prot_l1 = PMD_TYPE_TABLE,
4562 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4563@@ -309,17 +333,30 @@ static struct mem_type mem_types[] = {
4564 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4565 .domain = DOMAIN_KERNEL,
4566 },
4567- [MT_ROM] = {
4568- .prot_sect = PMD_TYPE_SECT,
4569+ [MT_MEMORY_RX] = {
4570+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC,
4571+ .prot_l1 = PMD_TYPE_TABLE,
4572+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4573+ .domain = DOMAIN_KERNEL,
4574+ },
4575+ [MT_ROM_RX] = {
4576+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
4577 .domain = DOMAIN_KERNEL,
4578 },
4579- [MT_MEMORY_RWX_NONCACHED] = {
4580+ [MT_MEMORY_RW_NONCACHED] = {
4581 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4582 L_PTE_MT_BUFFERABLE,
4583 .prot_l1 = PMD_TYPE_TABLE,
4584 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4585 .domain = DOMAIN_KERNEL,
4586 },
4587+ [MT_MEMORY_RX_NONCACHED] = {
4588+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC |
4589+ L_PTE_MT_BUFFERABLE,
4590+ .prot_l1 = PMD_TYPE_TABLE,
4591+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4592+ .domain = DOMAIN_KERNEL,
4593+ },
4594 [MT_MEMORY_RW_DTCM] = {
4595 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4596 L_PTE_XN,
4597@@ -327,9 +364,10 @@ static struct mem_type mem_types[] = {
4598 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4599 .domain = DOMAIN_KERNEL,
4600 },
4601- [MT_MEMORY_RWX_ITCM] = {
4602- .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4603+ [MT_MEMORY_RX_ITCM] = {
4604+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC,
4605 .prot_l1 = PMD_TYPE_TABLE,
4606+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4607 .domain = DOMAIN_KERNEL,
4608 },
4609 [MT_MEMORY_RW_SO] = {
4610@@ -547,9 +585,14 @@ static void __init build_mem_type_table(void)
4611 * Mark cache clean areas and XIP ROM read only
4612 * from SVC mode and no access from userspace.
4613 */
4614- mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4615- mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4616- mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4617+ mem_types[MT_ROM_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4618+#ifdef CONFIG_PAX_KERNEXEC
4619+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4620+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4621+ mem_types[MT_MEMORY_RX_ITCM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4622+#endif
4623+ mem_types[MT_MINICLEAN_RO].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4624+ mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4625 #endif
4626
4627 /*
4628@@ -566,13 +609,17 @@ static void __init build_mem_type_table(void)
4629 mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
4630 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
4631 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
4632- mem_types[MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
4633- mem_types[MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
4634+ mem_types[__MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
4635+ mem_types[__MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
4636 mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S;
4637 mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_SHARED;
4638+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_S;
4639+ mem_types[MT_MEMORY_RX].prot_pte |= L_PTE_SHARED;
4640 mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
4641- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_S;
4642- mem_types[MT_MEMORY_RWX_NONCACHED].prot_pte |= L_PTE_SHARED;
4643+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= PMD_SECT_S;
4644+ mem_types[MT_MEMORY_RW_NONCACHED].prot_pte |= L_PTE_SHARED;
4645+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_S;
4646+ mem_types[MT_MEMORY_RX_NONCACHED].prot_pte |= L_PTE_SHARED;
4647 }
4648 }
4649
4650@@ -583,15 +630,20 @@ static void __init build_mem_type_table(void)
4651 if (cpu_arch >= CPU_ARCH_ARMv6) {
4652 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
4653 /* Non-cacheable Normal is XCB = 001 */
4654- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |=
4655+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |=
4656+ PMD_SECT_BUFFERED;
4657+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |=
4658 PMD_SECT_BUFFERED;
4659 } else {
4660 /* For both ARMv6 and non-TEX-remapping ARMv7 */
4661- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |=
4662+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |=
4663+ PMD_SECT_TEX(1);
4664+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |=
4665 PMD_SECT_TEX(1);
4666 }
4667 } else {
4668- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
4669+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
4670+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
4671 }
4672
4673 #ifdef CONFIG_ARM_LPAE
4674@@ -607,6 +659,8 @@ static void __init build_mem_type_table(void)
4675 vecs_pgprot |= PTE_EXT_AF;
4676 #endif
4677
4678+ user_pgprot |= __supported_pte_mask;
4679+
4680 for (i = 0; i < 16; i++) {
4681 pteval_t v = pgprot_val(protection_map[i]);
4682 protection_map[i] = __pgprot(v | user_pgprot);
4683@@ -624,21 +678,24 @@ static void __init build_mem_type_table(void)
4684
4685 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
4686 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
4687- mem_types[MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
4688- mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot;
4689+ mem_types[__MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
4690+ mem_types[__MT_MEMORY_RWX].prot_pte |= kern_pgprot;
4691 mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
4692 mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot;
4693+ mem_types[MT_MEMORY_RX].prot_sect |= ecc_mask | cp->pmd;
4694+ mem_types[MT_MEMORY_RX].prot_pte |= kern_pgprot;
4695 mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
4696- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= ecc_mask;
4697- mem_types[MT_ROM].prot_sect |= cp->pmd;
4698+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= ecc_mask;
4699+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= ecc_mask;
4700+ mem_types[MT_ROM_RX].prot_sect |= cp->pmd;
4701
4702 switch (cp->pmd) {
4703 case PMD_SECT_WT:
4704- mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT;
4705+ mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_WT;
4706 break;
4707 case PMD_SECT_WB:
4708 case PMD_SECT_WBWA:
4709- mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB;
4710+ mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_WB;
4711 break;
4712 }
4713 pr_info("Memory policy: %sData cache %s\n",
4714@@ -856,7 +913,7 @@ static void __init create_mapping(struct map_desc *md)
4715 return;
4716 }
4717
4718- if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
4719+ if ((md->type == MT_DEVICE || md->type == MT_ROM_RX) &&
4720 md->virtual >= PAGE_OFFSET &&
4721 (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
4722 printk(KERN_WARNING "BUG: mapping for 0x%08llx"
4723@@ -1224,18 +1281,15 @@ void __init arm_mm_memblock_reserve(void)
4724 * called function. This means you can't use any function or debugging
4725 * method which may touch any device, otherwise the kernel _will_ crash.
4726 */
4727+
4728+static char vectors[PAGE_SIZE * 2] __read_only __aligned(PAGE_SIZE);
4729+
4730 static void __init devicemaps_init(const struct machine_desc *mdesc)
4731 {
4732 struct map_desc map;
4733 unsigned long addr;
4734- void *vectors;
4735
4736- /*
4737- * Allocate the vector page early.
4738- */
4739- vectors = early_alloc(PAGE_SIZE * 2);
4740-
4741- early_trap_init(vectors);
4742+ early_trap_init(&vectors);
4743
4744 for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
4745 pmd_clear(pmd_off_k(addr));
4746@@ -1248,7 +1302,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
4747 map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
4748 map.virtual = MODULES_VADDR;
4749 map.length = ((unsigned long)_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK;
4750- map.type = MT_ROM;
4751+ map.type = MT_ROM_RX;
4752 create_mapping(&map);
4753 #endif
4754
4755@@ -1259,14 +1313,14 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
4756 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS);
4757 map.virtual = FLUSH_BASE;
4758 map.length = SZ_1M;
4759- map.type = MT_CACHECLEAN;
4760+ map.type = MT_CACHECLEAN_RO;
4761 create_mapping(&map);
4762 #endif
4763 #ifdef FLUSH_BASE_MINICACHE
4764 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M);
4765 map.virtual = FLUSH_BASE_MINICACHE;
4766 map.length = SZ_1M;
4767- map.type = MT_MINICLEAN;
4768+ map.type = MT_MINICLEAN_RO;
4769 create_mapping(&map);
4770 #endif
4771
4772@@ -1275,7 +1329,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
4773 * location (0xffff0000). If we aren't using high-vectors, also
4774 * create a mapping at the low-vectors virtual address.
4775 */
4776- map.pfn = __phys_to_pfn(virt_to_phys(vectors));
4777+ map.pfn = __phys_to_pfn(virt_to_phys(&vectors));
4778 map.virtual = 0xffff0000;
4779 map.length = PAGE_SIZE;
4780 #ifdef CONFIG_KUSER_HELPERS
4781@@ -1335,8 +1389,10 @@ static void __init kmap_init(void)
4782 static void __init map_lowmem(void)
4783 {
4784 struct memblock_region *reg;
4785+#ifndef CONFIG_PAX_KERNEXEC
4786 unsigned long kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
4787 unsigned long kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
4788+#endif
4789
4790 /* Map all the lowmem memory banks. */
4791 for_each_memblock(memory, reg) {
4792@@ -1349,11 +1405,48 @@ static void __init map_lowmem(void)
4793 if (start >= end)
4794 break;
4795
4796+#ifdef CONFIG_PAX_KERNEXEC
4797+ map.pfn = __phys_to_pfn(start);
4798+ map.virtual = __phys_to_virt(start);
4799+ map.length = end - start;
4800+
4801+ if (map.virtual <= (unsigned long)_stext && ((unsigned long)_end < (map.virtual + map.length))) {
4802+ struct map_desc kernel;
4803+ struct map_desc initmap;
4804+
4805+ /* when freeing initmem we will make this RW */
4806+ initmap.pfn = __phys_to_pfn(__pa(__init_begin));
4807+ initmap.virtual = (unsigned long)__init_begin;
4808+ initmap.length = _sdata - __init_begin;
4809+ initmap.type = __MT_MEMORY_RWX;
4810+ create_mapping(&initmap);
4811+
4812+ /* when freeing initmem we will make this RX */
4813+ kernel.pfn = __phys_to_pfn(__pa(_stext));
4814+ kernel.virtual = (unsigned long)_stext;
4815+ kernel.length = __init_begin - _stext;
4816+ kernel.type = __MT_MEMORY_RWX;
4817+ create_mapping(&kernel);
4818+
4819+ if (map.virtual < (unsigned long)_stext) {
4820+ map.length = (unsigned long)_stext - map.virtual;
4821+ map.type = __MT_MEMORY_RWX;
4822+ create_mapping(&map);
4823+ }
4824+
4825+ map.pfn = __phys_to_pfn(__pa(_sdata));
4826+ map.virtual = (unsigned long)_sdata;
4827+ map.length = end - __pa(_sdata);
4828+ }
4829+
4830+ map.type = MT_MEMORY_RW;
4831+ create_mapping(&map);
4832+#else
4833 if (end < kernel_x_start || start >= kernel_x_end) {
4834 map.pfn = __phys_to_pfn(start);
4835 map.virtual = __phys_to_virt(start);
4836 map.length = end - start;
4837- map.type = MT_MEMORY_RWX;
4838+ map.type = __MT_MEMORY_RWX;
4839
4840 create_mapping(&map);
4841 } else {
4842@@ -1370,7 +1463,7 @@ static void __init map_lowmem(void)
4843 map.pfn = __phys_to_pfn(kernel_x_start);
4844 map.virtual = __phys_to_virt(kernel_x_start);
4845 map.length = kernel_x_end - kernel_x_start;
4846- map.type = MT_MEMORY_RWX;
4847+ map.type = __MT_MEMORY_RWX;
4848
4849 create_mapping(&map);
4850
4851@@ -1383,6 +1476,7 @@ static void __init map_lowmem(void)
4852 create_mapping(&map);
4853 }
4854 }
4855+#endif
4856 }
4857 }
4858
4859diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
4860index a37b989..5c9ae75 100644
4861--- a/arch/arm/net/bpf_jit_32.c
4862+++ b/arch/arm/net/bpf_jit_32.c
4863@@ -71,7 +71,11 @@ struct jit_ctx {
4864 #endif
4865 };
4866
4867+#ifdef CONFIG_GRKERNSEC_BPF_HARDEN
4868+int bpf_jit_enable __read_only;
4869+#else
4870 int bpf_jit_enable __read_mostly;
4871+#endif
4872
4873 static u64 jit_get_skb_b(struct sk_buff *skb, unsigned offset)
4874 {
4875@@ -930,5 +934,6 @@ void bpf_jit_free(struct bpf_prog *fp)
4876 {
4877 if (fp->jited)
4878 module_free(NULL, fp->bpf_func);
4879- kfree(fp);
4880+
4881+ bpf_prog_unlock_free(fp);
4882 }
4883diff --git a/arch/arm/plat-iop/setup.c b/arch/arm/plat-iop/setup.c
4884index 5b217f4..c23f40e 100644
4885--- a/arch/arm/plat-iop/setup.c
4886+++ b/arch/arm/plat-iop/setup.c
4887@@ -24,7 +24,7 @@ static struct map_desc iop3xx_std_desc[] __initdata = {
4888 .virtual = IOP3XX_PERIPHERAL_VIRT_BASE,
4889 .pfn = __phys_to_pfn(IOP3XX_PERIPHERAL_PHYS_BASE),
4890 .length = IOP3XX_PERIPHERAL_SIZE,
4891- .type = MT_UNCACHED,
4892+ .type = MT_UNCACHED_RW,
4893 },
4894 };
4895
4896diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
4897index a5bc92d..0bb4730 100644
4898--- a/arch/arm/plat-omap/sram.c
4899+++ b/arch/arm/plat-omap/sram.c
4900@@ -93,6 +93,8 @@ void __init omap_map_sram(unsigned long start, unsigned long size,
4901 * Looks like we need to preserve some bootloader code at the
4902 * beginning of SRAM for jumping to flash for reboot to work...
4903 */
4904+ pax_open_kernel();
4905 memset_io(omap_sram_base + omap_sram_skip, 0,
4906 omap_sram_size - omap_sram_skip);
4907+ pax_close_kernel();
4908 }
4909diff --git a/arch/arm/plat-samsung/include/plat/dma-ops.h b/arch/arm/plat-samsung/include/plat/dma-ops.h
4910index ce6d763..cfea917 100644
4911--- a/arch/arm/plat-samsung/include/plat/dma-ops.h
4912+++ b/arch/arm/plat-samsung/include/plat/dma-ops.h
4913@@ -47,7 +47,7 @@ struct samsung_dma_ops {
4914 int (*started)(unsigned ch);
4915 int (*flush)(unsigned ch);
4916 int (*stop)(unsigned ch);
4917-};
4918+} __no_const;
4919
4920 extern void *samsung_dmadev_get_ops(void);
4921 extern void *s3c_dma_get_ops(void);
4922diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
4923index 6389d60..b5d3bdd 100644
4924--- a/arch/arm64/include/asm/barrier.h
4925+++ b/arch/arm64/include/asm/barrier.h
4926@@ -41,7 +41,7 @@
4927 do { \
4928 compiletime_assert_atomic_type(*p); \
4929 barrier(); \
4930- ACCESS_ONCE(*p) = (v); \
4931+ ACCESS_ONCE_RW(*p) = (v); \
4932 } while (0)
4933
4934 #define smp_load_acquire(p) \
4935diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
4936index 3bf8f4e..5dd5491 100644
4937--- a/arch/arm64/include/asm/uaccess.h
4938+++ b/arch/arm64/include/asm/uaccess.h
4939@@ -99,6 +99,7 @@ static inline void set_fs(mm_segment_t fs)
4940 flag; \
4941 })
4942
4943+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
4944 #define access_ok(type, addr, size) __range_ok(addr, size)
4945 #define user_addr_max get_fs
4946
4947diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
4948index c3a58a1..78fbf54 100644
4949--- a/arch/avr32/include/asm/cache.h
4950+++ b/arch/avr32/include/asm/cache.h
4951@@ -1,8 +1,10 @@
4952 #ifndef __ASM_AVR32_CACHE_H
4953 #define __ASM_AVR32_CACHE_H
4954
4955+#include <linux/const.h>
4956+
4957 #define L1_CACHE_SHIFT 5
4958-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4959+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4960
4961 /*
4962 * Memory returned by kmalloc() may be used for DMA, so we must make
4963diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
4964index d232888..87c8df1 100644
4965--- a/arch/avr32/include/asm/elf.h
4966+++ b/arch/avr32/include/asm/elf.h
4967@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
4968 the loader. We need to make sure that it is out of the way of the program
4969 that it will "exec", and that there is sufficient room for the brk. */
4970
4971-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
4972+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
4973
4974+#ifdef CONFIG_PAX_ASLR
4975+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
4976+
4977+#define PAX_DELTA_MMAP_LEN 15
4978+#define PAX_DELTA_STACK_LEN 15
4979+#endif
4980
4981 /* This yields a mask that user programs can use to figure out what
4982 instruction set this CPU supports. This could be done in user space,
4983diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
4984index 479330b..53717a8 100644
4985--- a/arch/avr32/include/asm/kmap_types.h
4986+++ b/arch/avr32/include/asm/kmap_types.h
4987@@ -2,9 +2,9 @@
4988 #define __ASM_AVR32_KMAP_TYPES_H
4989
4990 #ifdef CONFIG_DEBUG_HIGHMEM
4991-# define KM_TYPE_NR 29
4992+# define KM_TYPE_NR 30
4993 #else
4994-# define KM_TYPE_NR 14
4995+# define KM_TYPE_NR 15
4996 #endif
4997
4998 #endif /* __ASM_AVR32_KMAP_TYPES_H */
4999diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
5000index 0eca933..eb78c7b 100644
5001--- a/arch/avr32/mm/fault.c
5002+++ b/arch/avr32/mm/fault.c
5003@@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
5004
5005 int exception_trace = 1;
5006
5007+#ifdef CONFIG_PAX_PAGEEXEC
5008+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5009+{
5010+ unsigned long i;
5011+
5012+ printk(KERN_ERR "PAX: bytes at PC: ");
5013+ for (i = 0; i < 20; i++) {
5014+ unsigned char c;
5015+ if (get_user(c, (unsigned char *)pc+i))
5016+ printk(KERN_CONT "???????? ");
5017+ else
5018+ printk(KERN_CONT "%02x ", c);
5019+ }
5020+ printk("\n");
5021+}
5022+#endif
5023+
5024 /*
5025 * This routine handles page faults. It determines the address and the
5026 * problem, and then passes it off to one of the appropriate routines.
5027@@ -176,6 +193,16 @@ bad_area:
5028 up_read(&mm->mmap_sem);
5029
5030 if (user_mode(regs)) {
5031+
5032+#ifdef CONFIG_PAX_PAGEEXEC
5033+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
5034+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
5035+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
5036+ do_group_exit(SIGKILL);
5037+ }
5038+ }
5039+#endif
5040+
5041 if (exception_trace && printk_ratelimit())
5042 printk("%s%s[%d]: segfault at %08lx pc %08lx "
5043 "sp %08lx ecr %lu\n",
5044diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
5045index 568885a..f8008df 100644
5046--- a/arch/blackfin/include/asm/cache.h
5047+++ b/arch/blackfin/include/asm/cache.h
5048@@ -7,6 +7,7 @@
5049 #ifndef __ARCH_BLACKFIN_CACHE_H
5050 #define __ARCH_BLACKFIN_CACHE_H
5051
5052+#include <linux/const.h>
5053 #include <linux/linkage.h> /* for asmlinkage */
5054
5055 /*
5056@@ -14,7 +15,7 @@
5057 * Blackfin loads 32 bytes for cache
5058 */
5059 #define L1_CACHE_SHIFT 5
5060-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5061+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5062 #define SMP_CACHE_BYTES L1_CACHE_BYTES
5063
5064 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
5065diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h
5066index aea2718..3639a60 100644
5067--- a/arch/cris/include/arch-v10/arch/cache.h
5068+++ b/arch/cris/include/arch-v10/arch/cache.h
5069@@ -1,8 +1,9 @@
5070 #ifndef _ASM_ARCH_CACHE_H
5071 #define _ASM_ARCH_CACHE_H
5072
5073+#include <linux/const.h>
5074 /* Etrax 100LX have 32-byte cache-lines. */
5075-#define L1_CACHE_BYTES 32
5076 #define L1_CACHE_SHIFT 5
5077+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5078
5079 #endif /* _ASM_ARCH_CACHE_H */
5080diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h
5081index 7caf25d..ee65ac5 100644
5082--- a/arch/cris/include/arch-v32/arch/cache.h
5083+++ b/arch/cris/include/arch-v32/arch/cache.h
5084@@ -1,11 +1,12 @@
5085 #ifndef _ASM_CRIS_ARCH_CACHE_H
5086 #define _ASM_CRIS_ARCH_CACHE_H
5087
5088+#include <linux/const.h>
5089 #include <arch/hwregs/dma.h>
5090
5091 /* A cache-line is 32 bytes. */
5092-#define L1_CACHE_BYTES 32
5093 #define L1_CACHE_SHIFT 5
5094+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5095
5096 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
5097
5098diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
5099index f6c3a16..cd422a4 100644
5100--- a/arch/frv/include/asm/atomic.h
5101+++ b/arch/frv/include/asm/atomic.h
5102@@ -181,6 +181,16 @@ static inline void atomic64_dec(atomic64_t *v)
5103 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
5104 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
5105
5106+#define atomic64_read_unchecked(v) atomic64_read(v)
5107+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
5108+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
5109+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
5110+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
5111+#define atomic64_inc_unchecked(v) atomic64_inc(v)
5112+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
5113+#define atomic64_dec_unchecked(v) atomic64_dec(v)
5114+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
5115+
5116 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
5117 {
5118 int c, old;
5119diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
5120index 2797163..c2a401d 100644
5121--- a/arch/frv/include/asm/cache.h
5122+++ b/arch/frv/include/asm/cache.h
5123@@ -12,10 +12,11 @@
5124 #ifndef __ASM_CACHE_H
5125 #define __ASM_CACHE_H
5126
5127+#include <linux/const.h>
5128
5129 /* bytes per L1 cache line */
5130 #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
5131-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5132+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5133
5134 #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
5135 #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
5136diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
5137index 43901f2..0d8b865 100644
5138--- a/arch/frv/include/asm/kmap_types.h
5139+++ b/arch/frv/include/asm/kmap_types.h
5140@@ -2,6 +2,6 @@
5141 #ifndef _ASM_KMAP_TYPES_H
5142 #define _ASM_KMAP_TYPES_H
5143
5144-#define KM_TYPE_NR 17
5145+#define KM_TYPE_NR 18
5146
5147 #endif
5148diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
5149index 836f147..4cf23f5 100644
5150--- a/arch/frv/mm/elf-fdpic.c
5151+++ b/arch/frv/mm/elf-fdpic.c
5152@@ -61,6 +61,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5153 {
5154 struct vm_area_struct *vma;
5155 struct vm_unmapped_area_info info;
5156+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
5157
5158 if (len > TASK_SIZE)
5159 return -ENOMEM;
5160@@ -73,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5161 if (addr) {
5162 addr = PAGE_ALIGN(addr);
5163 vma = find_vma(current->mm, addr);
5164- if (TASK_SIZE - len >= addr &&
5165- (!vma || addr + len <= vma->vm_start))
5166+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
5167 goto success;
5168 }
5169
5170@@ -85,6 +85,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5171 info.high_limit = (current->mm->start_stack - 0x00200000);
5172 info.align_mask = 0;
5173 info.align_offset = 0;
5174+ info.threadstack_offset = offset;
5175 addr = vm_unmapped_area(&info);
5176 if (!(addr & ~PAGE_MASK))
5177 goto success;
5178diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h
5179index 2635117..fa223cb 100644
5180--- a/arch/hexagon/include/asm/cache.h
5181+++ b/arch/hexagon/include/asm/cache.h
5182@@ -21,9 +21,11 @@
5183 #ifndef __ASM_CACHE_H
5184 #define __ASM_CACHE_H
5185
5186+#include <linux/const.h>
5187+
5188 /* Bytes per L1 cache line */
5189-#define L1_CACHE_SHIFT (5)
5190-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5191+#define L1_CACHE_SHIFT 5
5192+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5193
5194 #define __cacheline_aligned __aligned(L1_CACHE_BYTES)
5195 #define ____cacheline_aligned __aligned(L1_CACHE_BYTES)
5196diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
5197index c84c88b..2a6e1ba 100644
5198--- a/arch/ia64/Kconfig
5199+++ b/arch/ia64/Kconfig
5200@@ -549,6 +549,7 @@ source "drivers/sn/Kconfig"
5201 config KEXEC
5202 bool "kexec system call"
5203 depends on !IA64_HP_SIM && (!SMP || HOTPLUG_CPU)
5204+ depends on !GRKERNSEC_KMEM
5205 help
5206 kexec is a system call that implements the ability to shutdown your
5207 current kernel, and to start another kernel. It is like a reboot
5208diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile
5209index 5441b14..039a446 100644
5210--- a/arch/ia64/Makefile
5211+++ b/arch/ia64/Makefile
5212@@ -99,5 +99,6 @@ endef
5213 archprepare: make_nr_irqs_h FORCE
5214 PHONY += make_nr_irqs_h FORCE
5215
5216+make_nr_irqs_h: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
5217 make_nr_irqs_h: FORCE
5218 $(Q)$(MAKE) $(build)=arch/ia64/kernel include/generated/nr-irqs.h
5219diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
5220index 0f8bf48..40ea950 100644
5221--- a/arch/ia64/include/asm/atomic.h
5222+++ b/arch/ia64/include/asm/atomic.h
5223@@ -209,4 +209,14 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
5224 #define atomic64_inc(v) atomic64_add(1, (v))
5225 #define atomic64_dec(v) atomic64_sub(1, (v))
5226
5227+#define atomic64_read_unchecked(v) atomic64_read(v)
5228+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
5229+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
5230+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
5231+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
5232+#define atomic64_inc_unchecked(v) atomic64_inc(v)
5233+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
5234+#define atomic64_dec_unchecked(v) atomic64_dec(v)
5235+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
5236+
5237 #endif /* _ASM_IA64_ATOMIC_H */
5238diff --git a/arch/ia64/include/asm/barrier.h b/arch/ia64/include/asm/barrier.h
5239index a48957c..e097b56 100644
5240--- a/arch/ia64/include/asm/barrier.h
5241+++ b/arch/ia64/include/asm/barrier.h
5242@@ -67,7 +67,7 @@
5243 do { \
5244 compiletime_assert_atomic_type(*p); \
5245 barrier(); \
5246- ACCESS_ONCE(*p) = (v); \
5247+ ACCESS_ONCE_RW(*p) = (v); \
5248 } while (0)
5249
5250 #define smp_load_acquire(p) \
5251diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
5252index 988254a..e1ee885 100644
5253--- a/arch/ia64/include/asm/cache.h
5254+++ b/arch/ia64/include/asm/cache.h
5255@@ -1,6 +1,7 @@
5256 #ifndef _ASM_IA64_CACHE_H
5257 #define _ASM_IA64_CACHE_H
5258
5259+#include <linux/const.h>
5260
5261 /*
5262 * Copyright (C) 1998-2000 Hewlett-Packard Co
5263@@ -9,7 +10,7 @@
5264
5265 /* Bytes per L1 (data) cache line. */
5266 #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
5267-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5268+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5269
5270 #ifdef CONFIG_SMP
5271 # define SMP_CACHE_SHIFT L1_CACHE_SHIFT
5272diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
5273index 5a83c5c..4d7f553 100644
5274--- a/arch/ia64/include/asm/elf.h
5275+++ b/arch/ia64/include/asm/elf.h
5276@@ -42,6 +42,13 @@
5277 */
5278 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
5279
5280+#ifdef CONFIG_PAX_ASLR
5281+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
5282+
5283+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
5284+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
5285+#endif
5286+
5287 #define PT_IA_64_UNWIND 0x70000001
5288
5289 /* IA-64 relocations: */
5290diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h
5291index 5767cdf..7462574 100644
5292--- a/arch/ia64/include/asm/pgalloc.h
5293+++ b/arch/ia64/include/asm/pgalloc.h
5294@@ -39,6 +39,12 @@ pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
5295 pgd_val(*pgd_entry) = __pa(pud);
5296 }
5297
5298+static inline void
5299+pgd_populate_kernel(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
5300+{
5301+ pgd_populate(mm, pgd_entry, pud);
5302+}
5303+
5304 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
5305 {
5306 return quicklist_alloc(0, GFP_KERNEL, NULL);
5307@@ -57,6 +63,12 @@ pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
5308 pud_val(*pud_entry) = __pa(pmd);
5309 }
5310
5311+static inline void
5312+pud_populate_kernel(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
5313+{
5314+ pud_populate(mm, pud_entry, pmd);
5315+}
5316+
5317 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
5318 {
5319 return quicklist_alloc(0, GFP_KERNEL, NULL);
5320diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
5321index 7935115..c0eca6a 100644
5322--- a/arch/ia64/include/asm/pgtable.h
5323+++ b/arch/ia64/include/asm/pgtable.h
5324@@ -12,7 +12,7 @@
5325 * David Mosberger-Tang <davidm@hpl.hp.com>
5326 */
5327
5328-
5329+#include <linux/const.h>
5330 #include <asm/mman.h>
5331 #include <asm/page.h>
5332 #include <asm/processor.h>
5333@@ -142,6 +142,17 @@
5334 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5335 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5336 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
5337+
5338+#ifdef CONFIG_PAX_PAGEEXEC
5339+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
5340+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5341+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5342+#else
5343+# define PAGE_SHARED_NOEXEC PAGE_SHARED
5344+# define PAGE_READONLY_NOEXEC PAGE_READONLY
5345+# define PAGE_COPY_NOEXEC PAGE_COPY
5346+#endif
5347+
5348 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
5349 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
5350 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
5351diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
5352index 45698cd..e8e2dbc 100644
5353--- a/arch/ia64/include/asm/spinlock.h
5354+++ b/arch/ia64/include/asm/spinlock.h
5355@@ -71,7 +71,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
5356 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
5357
5358 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
5359- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
5360+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
5361 }
5362
5363 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
5364diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
5365index 449c8c0..3d4b1e9 100644
5366--- a/arch/ia64/include/asm/uaccess.h
5367+++ b/arch/ia64/include/asm/uaccess.h
5368@@ -70,6 +70,7 @@
5369 && ((segment).seg == KERNEL_DS.seg \
5370 || likely(REGION_OFFSET((unsigned long) (addr)) < RGN_MAP_LIMIT))); \
5371 })
5372+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
5373 #define access_ok(type, addr, size) __access_ok((addr), (size), get_fs())
5374
5375 /*
5376@@ -240,12 +241,24 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use
5377 static inline unsigned long
5378 __copy_to_user (void __user *to, const void *from, unsigned long count)
5379 {
5380+ if (count > INT_MAX)
5381+ return count;
5382+
5383+ if (!__builtin_constant_p(count))
5384+ check_object_size(from, count, true);
5385+
5386 return __copy_user(to, (__force void __user *) from, count);
5387 }
5388
5389 static inline unsigned long
5390 __copy_from_user (void *to, const void __user *from, unsigned long count)
5391 {
5392+ if (count > INT_MAX)
5393+ return count;
5394+
5395+ if (!__builtin_constant_p(count))
5396+ check_object_size(to, count, false);
5397+
5398 return __copy_user((__force void __user *) to, from, count);
5399 }
5400
5401@@ -255,10 +268,13 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
5402 ({ \
5403 void __user *__cu_to = (to); \
5404 const void *__cu_from = (from); \
5405- long __cu_len = (n); \
5406+ unsigned long __cu_len = (n); \
5407 \
5408- if (__access_ok(__cu_to, __cu_len, get_fs())) \
5409+ if (__cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) { \
5410+ if (!__builtin_constant_p(n)) \
5411+ check_object_size(__cu_from, __cu_len, true); \
5412 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
5413+ } \
5414 __cu_len; \
5415 })
5416
5417@@ -266,11 +282,14 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
5418 ({ \
5419 void *__cu_to = (to); \
5420 const void __user *__cu_from = (from); \
5421- long __cu_len = (n); \
5422+ unsigned long __cu_len = (n); \
5423 \
5424 __chk_user_ptr(__cu_from); \
5425- if (__access_ok(__cu_from, __cu_len, get_fs())) \
5426+ if (__cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) { \
5427+ if (!__builtin_constant_p(n)) \
5428+ check_object_size(__cu_to, __cu_len, false); \
5429 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
5430+ } \
5431 __cu_len; \
5432 })
5433
5434diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
5435index 24603be..948052d 100644
5436--- a/arch/ia64/kernel/module.c
5437+++ b/arch/ia64/kernel/module.c
5438@@ -307,8 +307,7 @@ plt_target (struct plt_entry *plt)
5439 void
5440 module_free (struct module *mod, void *module_region)
5441 {
5442- if (mod && mod->arch.init_unw_table &&
5443- module_region == mod->module_init) {
5444+ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
5445 unw_remove_unwind_table(mod->arch.init_unw_table);
5446 mod->arch.init_unw_table = NULL;
5447 }
5448@@ -494,15 +493,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
5449 }
5450
5451 static inline int
5452+in_init_rx (const struct module *mod, uint64_t addr)
5453+{
5454+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
5455+}
5456+
5457+static inline int
5458+in_init_rw (const struct module *mod, uint64_t addr)
5459+{
5460+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
5461+}
5462+
5463+static inline int
5464 in_init (const struct module *mod, uint64_t addr)
5465 {
5466- return addr - (uint64_t) mod->module_init < mod->init_size;
5467+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
5468+}
5469+
5470+static inline int
5471+in_core_rx (const struct module *mod, uint64_t addr)
5472+{
5473+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
5474+}
5475+
5476+static inline int
5477+in_core_rw (const struct module *mod, uint64_t addr)
5478+{
5479+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
5480 }
5481
5482 static inline int
5483 in_core (const struct module *mod, uint64_t addr)
5484 {
5485- return addr - (uint64_t) mod->module_core < mod->core_size;
5486+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
5487 }
5488
5489 static inline int
5490@@ -685,7 +708,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
5491 break;
5492
5493 case RV_BDREL:
5494- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
5495+ if (in_init_rx(mod, val))
5496+ val -= (uint64_t) mod->module_init_rx;
5497+ else if (in_init_rw(mod, val))
5498+ val -= (uint64_t) mod->module_init_rw;
5499+ else if (in_core_rx(mod, val))
5500+ val -= (uint64_t) mod->module_core_rx;
5501+ else if (in_core_rw(mod, val))
5502+ val -= (uint64_t) mod->module_core_rw;
5503 break;
5504
5505 case RV_LTV:
5506@@ -820,15 +850,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
5507 * addresses have been selected...
5508 */
5509 uint64_t gp;
5510- if (mod->core_size > MAX_LTOFF)
5511+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
5512 /*
5513 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
5514 * at the end of the module.
5515 */
5516- gp = mod->core_size - MAX_LTOFF / 2;
5517+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
5518 else
5519- gp = mod->core_size / 2;
5520- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
5521+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
5522+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
5523 mod->arch.gp = gp;
5524 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
5525 }
5526diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
5527index c39c3cd..3c77738 100644
5528--- a/arch/ia64/kernel/palinfo.c
5529+++ b/arch/ia64/kernel/palinfo.c
5530@@ -980,7 +980,7 @@ static int palinfo_cpu_callback(struct notifier_block *nfb,
5531 return NOTIFY_OK;
5532 }
5533
5534-static struct notifier_block __refdata palinfo_cpu_notifier =
5535+static struct notifier_block palinfo_cpu_notifier =
5536 {
5537 .notifier_call = palinfo_cpu_callback,
5538 .priority = 0,
5539diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
5540index 41e33f8..65180b2a 100644
5541--- a/arch/ia64/kernel/sys_ia64.c
5542+++ b/arch/ia64/kernel/sys_ia64.c
5543@@ -28,6 +28,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5544 unsigned long align_mask = 0;
5545 struct mm_struct *mm = current->mm;
5546 struct vm_unmapped_area_info info;
5547+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
5548
5549 if (len > RGN_MAP_LIMIT)
5550 return -ENOMEM;
5551@@ -43,6 +44,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5552 if (REGION_NUMBER(addr) == RGN_HPAGE)
5553 addr = 0;
5554 #endif
5555+
5556+#ifdef CONFIG_PAX_RANDMMAP
5557+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5558+ addr = mm->free_area_cache;
5559+ else
5560+#endif
5561+
5562 if (!addr)
5563 addr = TASK_UNMAPPED_BASE;
5564
5565@@ -61,6 +69,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5566 info.high_limit = TASK_SIZE;
5567 info.align_mask = align_mask;
5568 info.align_offset = 0;
5569+ info.threadstack_offset = offset;
5570 return vm_unmapped_area(&info);
5571 }
5572
5573diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
5574index 84f8a52..7c76178 100644
5575--- a/arch/ia64/kernel/vmlinux.lds.S
5576+++ b/arch/ia64/kernel/vmlinux.lds.S
5577@@ -192,7 +192,7 @@ SECTIONS {
5578 /* Per-cpu data: */
5579 . = ALIGN(PERCPU_PAGE_SIZE);
5580 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
5581- __phys_per_cpu_start = __per_cpu_load;
5582+ __phys_per_cpu_start = per_cpu_load;
5583 /*
5584 * ensure percpu data fits
5585 * into percpu page size
5586diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
5587index 7225dad..2a7c8256 100644
5588--- a/arch/ia64/mm/fault.c
5589+++ b/arch/ia64/mm/fault.c
5590@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
5591 return pte_present(pte);
5592 }
5593
5594+#ifdef CONFIG_PAX_PAGEEXEC
5595+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5596+{
5597+ unsigned long i;
5598+
5599+ printk(KERN_ERR "PAX: bytes at PC: ");
5600+ for (i = 0; i < 8; i++) {
5601+ unsigned int c;
5602+ if (get_user(c, (unsigned int *)pc+i))
5603+ printk(KERN_CONT "???????? ");
5604+ else
5605+ printk(KERN_CONT "%08x ", c);
5606+ }
5607+ printk("\n");
5608+}
5609+#endif
5610+
5611 # define VM_READ_BIT 0
5612 # define VM_WRITE_BIT 1
5613 # define VM_EXEC_BIT 2
5614@@ -151,8 +168,21 @@ retry:
5615 if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
5616 goto bad_area;
5617
5618- if ((vma->vm_flags & mask) != mask)
5619+ if ((vma->vm_flags & mask) != mask) {
5620+
5621+#ifdef CONFIG_PAX_PAGEEXEC
5622+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
5623+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
5624+ goto bad_area;
5625+
5626+ up_read(&mm->mmap_sem);
5627+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
5628+ do_group_exit(SIGKILL);
5629+ }
5630+#endif
5631+
5632 goto bad_area;
5633+ }
5634
5635 /*
5636 * If for any reason at all we couldn't handle the fault, make
5637diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
5638index 76069c1..c2aa816 100644
5639--- a/arch/ia64/mm/hugetlbpage.c
5640+++ b/arch/ia64/mm/hugetlbpage.c
5641@@ -149,6 +149,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
5642 unsigned long pgoff, unsigned long flags)
5643 {
5644 struct vm_unmapped_area_info info;
5645+ unsigned long offset = gr_rand_threadstack_offset(current->mm, file, flags);
5646
5647 if (len > RGN_MAP_LIMIT)
5648 return -ENOMEM;
5649@@ -172,6 +173,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
5650 info.high_limit = HPAGE_REGION_BASE + RGN_MAP_LIMIT;
5651 info.align_mask = PAGE_MASK & (HPAGE_SIZE - 1);
5652 info.align_offset = 0;
5653+ info.threadstack_offset = offset;
5654 return vm_unmapped_area(&info);
5655 }
5656
5657diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
5658index 6b33457..88b5124 100644
5659--- a/arch/ia64/mm/init.c
5660+++ b/arch/ia64/mm/init.c
5661@@ -120,6 +120,19 @@ ia64_init_addr_space (void)
5662 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
5663 vma->vm_end = vma->vm_start + PAGE_SIZE;
5664 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
5665+
5666+#ifdef CONFIG_PAX_PAGEEXEC
5667+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
5668+ vma->vm_flags &= ~VM_EXEC;
5669+
5670+#ifdef CONFIG_PAX_MPROTECT
5671+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
5672+ vma->vm_flags &= ~VM_MAYEXEC;
5673+#endif
5674+
5675+ }
5676+#endif
5677+
5678 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5679 down_write(&current->mm->mmap_sem);
5680 if (insert_vm_struct(current->mm, vma)) {
5681@@ -286,7 +299,7 @@ static int __init gate_vma_init(void)
5682 gate_vma.vm_start = FIXADDR_USER_START;
5683 gate_vma.vm_end = FIXADDR_USER_END;
5684 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
5685- gate_vma.vm_page_prot = __P101;
5686+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
5687
5688 return 0;
5689 }
5690diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h
5691index 40b3ee9..8c2c112 100644
5692--- a/arch/m32r/include/asm/cache.h
5693+++ b/arch/m32r/include/asm/cache.h
5694@@ -1,8 +1,10 @@
5695 #ifndef _ASM_M32R_CACHE_H
5696 #define _ASM_M32R_CACHE_H
5697
5698+#include <linux/const.h>
5699+
5700 /* L1 cache line size */
5701 #define L1_CACHE_SHIFT 4
5702-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5703+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5704
5705 #endif /* _ASM_M32R_CACHE_H */
5706diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
5707index 82abd15..d95ae5d 100644
5708--- a/arch/m32r/lib/usercopy.c
5709+++ b/arch/m32r/lib/usercopy.c
5710@@ -14,6 +14,9 @@
5711 unsigned long
5712 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
5713 {
5714+ if ((long)n < 0)
5715+ return n;
5716+
5717 prefetch(from);
5718 if (access_ok(VERIFY_WRITE, to, n))
5719 __copy_user(to,from,n);
5720@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
5721 unsigned long
5722 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
5723 {
5724+ if ((long)n < 0)
5725+ return n;
5726+
5727 prefetchw(to);
5728 if (access_ok(VERIFY_READ, from, n))
5729 __copy_user_zeroing(to,from,n);
5730diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
5731index 0395c51..5f26031 100644
5732--- a/arch/m68k/include/asm/cache.h
5733+++ b/arch/m68k/include/asm/cache.h
5734@@ -4,9 +4,11 @@
5735 #ifndef __ARCH_M68K_CACHE_H
5736 #define __ARCH_M68K_CACHE_H
5737
5738+#include <linux/const.h>
5739+
5740 /* bytes per L1 cache line */
5741 #define L1_CACHE_SHIFT 4
5742-#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
5743+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5744
5745 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
5746
5747diff --git a/arch/metag/include/asm/barrier.h b/arch/metag/include/asm/barrier.h
5748index c7591e8..ecef036 100644
5749--- a/arch/metag/include/asm/barrier.h
5750+++ b/arch/metag/include/asm/barrier.h
5751@@ -89,7 +89,7 @@ static inline void fence(void)
5752 do { \
5753 compiletime_assert_atomic_type(*p); \
5754 smp_mb(); \
5755- ACCESS_ONCE(*p) = (v); \
5756+ ACCESS_ONCE_RW(*p) = (v); \
5757 } while (0)
5758
5759 #define smp_load_acquire(p) \
5760diff --git a/arch/metag/mm/hugetlbpage.c b/arch/metag/mm/hugetlbpage.c
5761index 3c32075..ae0ae75 100644
5762--- a/arch/metag/mm/hugetlbpage.c
5763+++ b/arch/metag/mm/hugetlbpage.c
5764@@ -200,6 +200,7 @@ hugetlb_get_unmapped_area_new_pmd(unsigned long len)
5765 info.high_limit = TASK_SIZE;
5766 info.align_mask = PAGE_MASK & HUGEPT_MASK;
5767 info.align_offset = 0;
5768+ info.threadstack_offset = 0;
5769 return vm_unmapped_area(&info);
5770 }
5771
5772diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h
5773index 4efe96a..60e8699 100644
5774--- a/arch/microblaze/include/asm/cache.h
5775+++ b/arch/microblaze/include/asm/cache.h
5776@@ -13,11 +13,12 @@
5777 #ifndef _ASM_MICROBLAZE_CACHE_H
5778 #define _ASM_MICROBLAZE_CACHE_H
5779
5780+#include <linux/const.h>
5781 #include <asm/registers.h>
5782
5783 #define L1_CACHE_SHIFT 5
5784 /* word-granular cache in microblaze */
5785-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5786+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5787
5788 #define SMP_CACHE_BYTES L1_CACHE_BYTES
5789
5790diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
5791index 574c430..470200d 100644
5792--- a/arch/mips/Kconfig
5793+++ b/arch/mips/Kconfig
5794@@ -2399,6 +2399,7 @@ source "kernel/Kconfig.preempt"
5795
5796 config KEXEC
5797 bool "Kexec system call"
5798+ depends on !GRKERNSEC_KMEM
5799 help
5800 kexec is a system call that implements the ability to shutdown your
5801 current kernel, and to start another kernel. It is like a reboot
5802diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c
5803index 02f2444..506969c 100644
5804--- a/arch/mips/cavium-octeon/dma-octeon.c
5805+++ b/arch/mips/cavium-octeon/dma-octeon.c
5806@@ -199,7 +199,7 @@ static void octeon_dma_free_coherent(struct device *dev, size_t size,
5807 if (dma_release_from_coherent(dev, order, vaddr))
5808 return;
5809
5810- swiotlb_free_coherent(dev, size, vaddr, dma_handle);
5811+ swiotlb_free_coherent(dev, size, vaddr, dma_handle, attrs);
5812 }
5813
5814 static dma_addr_t octeon_unity_phys_to_dma(struct device *dev, phys_addr_t paddr)
5815diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
5816index 37b2bef..02122b8 100644
5817--- a/arch/mips/include/asm/atomic.h
5818+++ b/arch/mips/include/asm/atomic.h
5819@@ -21,15 +21,39 @@
5820 #include <asm/cmpxchg.h>
5821 #include <asm/war.h>
5822
5823+#ifdef CONFIG_GENERIC_ATOMIC64
5824+#include <asm-generic/atomic64.h>
5825+#endif
5826+
5827 #define ATOMIC_INIT(i) { (i) }
5828
5829+#ifdef CONFIG_64BIT
5830+#define _ASM_EXTABLE(from, to) \
5831+" .section __ex_table,\"a\"\n" \
5832+" .dword " #from ", " #to"\n" \
5833+" .previous\n"
5834+#else
5835+#define _ASM_EXTABLE(from, to) \
5836+" .section __ex_table,\"a\"\n" \
5837+" .word " #from ", " #to"\n" \
5838+" .previous\n"
5839+#endif
5840+
5841 /*
5842 * atomic_read - read atomic variable
5843 * @v: pointer of type atomic_t
5844 *
5845 * Atomically reads the value of @v.
5846 */
5847-#define atomic_read(v) (*(volatile int *)&(v)->counter)
5848+static inline int atomic_read(const atomic_t *v)
5849+{
5850+ return (*(volatile const int *) &v->counter);
5851+}
5852+
5853+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
5854+{
5855+ return (*(volatile const int *) &v->counter);
5856+}
5857
5858 /*
5859 * atomic_set - set atomic variable
5860@@ -38,7 +62,15 @@
5861 *
5862 * Atomically sets the value of @v to @i.
5863 */
5864-#define atomic_set(v, i) ((v)->counter = (i))
5865+static inline void atomic_set(atomic_t *v, int i)
5866+{
5867+ v->counter = i;
5868+}
5869+
5870+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
5871+{
5872+ v->counter = i;
5873+}
5874
5875 /*
5876 * atomic_add - add integer to atomic variable
5877@@ -47,7 +79,67 @@
5878 *
5879 * Atomically adds @i to @v.
5880 */
5881-static __inline__ void atomic_add(int i, atomic_t * v)
5882+static __inline__ void atomic_add(int i, atomic_t *v)
5883+{
5884+ int temp;
5885+
5886+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
5887+ __asm__ __volatile__(
5888+ " .set mips3 \n"
5889+ "1: ll %0, %1 # atomic_add \n"
5890+#ifdef CONFIG_PAX_REFCOUNT
5891+ /* Exception on overflow. */
5892+ "2: add %0, %2 \n"
5893+#else
5894+ " addu %0, %2 \n"
5895+#endif
5896+ " sc %0, %1 \n"
5897+ " beqzl %0, 1b \n"
5898+#ifdef CONFIG_PAX_REFCOUNT
5899+ "3: \n"
5900+ _ASM_EXTABLE(2b, 3b)
5901+#endif
5902+ " .set mips0 \n"
5903+ : "=&r" (temp), "+m" (v->counter)
5904+ : "Ir" (i));
5905+ } else if (kernel_uses_llsc) {
5906+ __asm__ __volatile__(
5907+ " .set mips3 \n"
5908+ "1: ll %0, %1 # atomic_add \n"
5909+#ifdef CONFIG_PAX_REFCOUNT
5910+ /* Exception on overflow. */
5911+ "2: add %0, %2 \n"
5912+#else
5913+ " addu %0, %2 \n"
5914+#endif
5915+ " sc %0, %1 \n"
5916+ " beqz %0, 1b \n"
5917+#ifdef CONFIG_PAX_REFCOUNT
5918+ "3: \n"
5919+ _ASM_EXTABLE(2b, 3b)
5920+#endif
5921+ " .set mips0 \n"
5922+ : "=&r" (temp), "+m" (v->counter)
5923+ : "Ir" (i));
5924+ } else {
5925+ unsigned long flags;
5926+
5927+ raw_local_irq_save(flags);
5928+ __asm__ __volatile__(
5929+#ifdef CONFIG_PAX_REFCOUNT
5930+ /* Exception on overflow. */
5931+ "1: add %0, %1 \n"
5932+ "2: \n"
5933+ _ASM_EXTABLE(1b, 2b)
5934+#else
5935+ " addu %0, %1 \n"
5936+#endif
5937+ : "+r" (v->counter) : "Ir" (i));
5938+ raw_local_irq_restore(flags);
5939+ }
5940+}
5941+
5942+static __inline__ void atomic_add_unchecked(int i, atomic_unchecked_t *v)
5943 {
5944 if (kernel_uses_llsc && R10000_LLSC_WAR) {
5945 int temp;
5946@@ -90,7 +182,67 @@ static __inline__ void atomic_add(int i, atomic_t * v)
5947 *
5948 * Atomically subtracts @i from @v.
5949 */
5950-static __inline__ void atomic_sub(int i, atomic_t * v)
5951+static __inline__ void atomic_sub(int i, atomic_t *v)
5952+{
5953+ int temp;
5954+
5955+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
5956+ __asm__ __volatile__(
5957+ " .set mips3 \n"
5958+ "1: ll %0, %1 # atomic64_sub \n"
5959+#ifdef CONFIG_PAX_REFCOUNT
5960+ /* Exception on overflow. */
5961+ "2: sub %0, %2 \n"
5962+#else
5963+ " subu %0, %2 \n"
5964+#endif
5965+ " sc %0, %1 \n"
5966+ " beqzl %0, 1b \n"
5967+#ifdef CONFIG_PAX_REFCOUNT
5968+ "3: \n"
5969+ _ASM_EXTABLE(2b, 3b)
5970+#endif
5971+ " .set mips0 \n"
5972+ : "=&r" (temp), "+m" (v->counter)
5973+ : "Ir" (i));
5974+ } else if (kernel_uses_llsc) {
5975+ __asm__ __volatile__(
5976+ " .set mips3 \n"
5977+ "1: ll %0, %1 # atomic64_sub \n"
5978+#ifdef CONFIG_PAX_REFCOUNT
5979+ /* Exception on overflow. */
5980+ "2: sub %0, %2 \n"
5981+#else
5982+ " subu %0, %2 \n"
5983+#endif
5984+ " sc %0, %1 \n"
5985+ " beqz %0, 1b \n"
5986+#ifdef CONFIG_PAX_REFCOUNT
5987+ "3: \n"
5988+ _ASM_EXTABLE(2b, 3b)
5989+#endif
5990+ " .set mips0 \n"
5991+ : "=&r" (temp), "+m" (v->counter)
5992+ : "Ir" (i));
5993+ } else {
5994+ unsigned long flags;
5995+
5996+ raw_local_irq_save(flags);
5997+ __asm__ __volatile__(
5998+#ifdef CONFIG_PAX_REFCOUNT
5999+ /* Exception on overflow. */
6000+ "1: sub %0, %1 \n"
6001+ "2: \n"
6002+ _ASM_EXTABLE(1b, 2b)
6003+#else
6004+ " subu %0, %1 \n"
6005+#endif
6006+ : "+r" (v->counter) : "Ir" (i));
6007+ raw_local_irq_restore(flags);
6008+ }
6009+}
6010+
6011+static __inline__ void atomic_sub_unchecked(long i, atomic_unchecked_t *v)
6012 {
6013 if (kernel_uses_llsc && R10000_LLSC_WAR) {
6014 int temp;
6015@@ -129,7 +281,93 @@ static __inline__ void atomic_sub(int i, atomic_t * v)
6016 /*
6017 * Same as above, but return the result value
6018 */
6019-static __inline__ int atomic_add_return(int i, atomic_t * v)
6020+static __inline__ int atomic_add_return(int i, atomic_t *v)
6021+{
6022+ int result;
6023+ int temp;
6024+
6025+ smp_mb__before_llsc();
6026+
6027+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6028+ __asm__ __volatile__(
6029+ " .set mips3 \n"
6030+ "1: ll %1, %2 # atomic_add_return \n"
6031+#ifdef CONFIG_PAX_REFCOUNT
6032+ "2: add %0, %1, %3 \n"
6033+#else
6034+ " addu %0, %1, %3 \n"
6035+#endif
6036+ " sc %0, %2 \n"
6037+ " beqzl %0, 1b \n"
6038+#ifdef CONFIG_PAX_REFCOUNT
6039+ " b 4f \n"
6040+ " .set noreorder \n"
6041+ "3: b 5f \n"
6042+ " move %0, %1 \n"
6043+ " .set reorder \n"
6044+ _ASM_EXTABLE(2b, 3b)
6045+#endif
6046+ "4: addu %0, %1, %3 \n"
6047+#ifdef CONFIG_PAX_REFCOUNT
6048+ "5: \n"
6049+#endif
6050+ " .set mips0 \n"
6051+ : "=&r" (result), "=&r" (temp), "+m" (v->counter)
6052+ : "Ir" (i));
6053+ } else if (kernel_uses_llsc) {
6054+ __asm__ __volatile__(
6055+ " .set mips3 \n"
6056+ "1: ll %1, %2 # atomic_add_return \n"
6057+#ifdef CONFIG_PAX_REFCOUNT
6058+ "2: add %0, %1, %3 \n"
6059+#else
6060+ " addu %0, %1, %3 \n"
6061+#endif
6062+ " sc %0, %2 \n"
6063+ " bnez %0, 4f \n"
6064+ " b 1b \n"
6065+#ifdef CONFIG_PAX_REFCOUNT
6066+ " .set noreorder \n"
6067+ "3: b 5f \n"
6068+ " move %0, %1 \n"
6069+ " .set reorder \n"
6070+ _ASM_EXTABLE(2b, 3b)
6071+#endif
6072+ "4: addu %0, %1, %3 \n"
6073+#ifdef CONFIG_PAX_REFCOUNT
6074+ "5: \n"
6075+#endif
6076+ " .set mips0 \n"
6077+ : "=&r" (result), "=&r" (temp), "+m" (v->counter)
6078+ : "Ir" (i));
6079+ } else {
6080+ unsigned long flags;
6081+
6082+ raw_local_irq_save(flags);
6083+ __asm__ __volatile__(
6084+ " lw %0, %1 \n"
6085+#ifdef CONFIG_PAX_REFCOUNT
6086+ /* Exception on overflow. */
6087+ "1: add %0, %2 \n"
6088+#else
6089+ " addu %0, %2 \n"
6090+#endif
6091+ " sw %0, %1 \n"
6092+#ifdef CONFIG_PAX_REFCOUNT
6093+ /* Note: Dest reg is not modified on overflow */
6094+ "2: \n"
6095+ _ASM_EXTABLE(1b, 2b)
6096+#endif
6097+ : "=&r" (result), "+m" (v->counter) : "Ir" (i));
6098+ raw_local_irq_restore(flags);
6099+ }
6100+
6101+ smp_llsc_mb();
6102+
6103+ return result;
6104+}
6105+
6106+static __inline__ int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
6107 {
6108 int result;
6109
6110@@ -178,7 +416,93 @@ static __inline__ int atomic_add_return(int i, atomic_t * v)
6111 return result;
6112 }
6113
6114-static __inline__ int atomic_sub_return(int i, atomic_t * v)
6115+static __inline__ int atomic_sub_return(int i, atomic_t *v)
6116+{
6117+ int result;
6118+ int temp;
6119+
6120+ smp_mb__before_llsc();
6121+
6122+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6123+ __asm__ __volatile__(
6124+ " .set mips3 \n"
6125+ "1: ll %1, %2 # atomic_sub_return \n"
6126+#ifdef CONFIG_PAX_REFCOUNT
6127+ "2: sub %0, %1, %3 \n"
6128+#else
6129+ " subu %0, %1, %3 \n"
6130+#endif
6131+ " sc %0, %2 \n"
6132+ " beqzl %0, 1b \n"
6133+#ifdef CONFIG_PAX_REFCOUNT
6134+ " b 4f \n"
6135+ " .set noreorder \n"
6136+ "3: b 5f \n"
6137+ " move %0, %1 \n"
6138+ " .set reorder \n"
6139+ _ASM_EXTABLE(2b, 3b)
6140+#endif
6141+ "4: subu %0, %1, %3 \n"
6142+#ifdef CONFIG_PAX_REFCOUNT
6143+ "5: \n"
6144+#endif
6145+ " .set mips0 \n"
6146+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
6147+ : "Ir" (i), "m" (v->counter)
6148+ : "memory");
6149+ } else if (kernel_uses_llsc) {
6150+ __asm__ __volatile__(
6151+ " .set mips3 \n"
6152+ "1: ll %1, %2 # atomic_sub_return \n"
6153+#ifdef CONFIG_PAX_REFCOUNT
6154+ "2: sub %0, %1, %3 \n"
6155+#else
6156+ " subu %0, %1, %3 \n"
6157+#endif
6158+ " sc %0, %2 \n"
6159+ " bnez %0, 4f \n"
6160+ " b 1b \n"
6161+#ifdef CONFIG_PAX_REFCOUNT
6162+ " .set noreorder \n"
6163+ "3: b 5f \n"
6164+ " move %0, %1 \n"
6165+ " .set reorder \n"
6166+ _ASM_EXTABLE(2b, 3b)
6167+#endif
6168+ "4: subu %0, %1, %3 \n"
6169+#ifdef CONFIG_PAX_REFCOUNT
6170+ "5: \n"
6171+#endif
6172+ " .set mips0 \n"
6173+ : "=&r" (result), "=&r" (temp), "+m" (v->counter)
6174+ : "Ir" (i));
6175+ } else {
6176+ unsigned long flags;
6177+
6178+ raw_local_irq_save(flags);
6179+ __asm__ __volatile__(
6180+ " lw %0, %1 \n"
6181+#ifdef CONFIG_PAX_REFCOUNT
6182+ /* Exception on overflow. */
6183+ "1: sub %0, %2 \n"
6184+#else
6185+ " subu %0, %2 \n"
6186+#endif
6187+ " sw %0, %1 \n"
6188+#ifdef CONFIG_PAX_REFCOUNT
6189+ /* Note: Dest reg is not modified on overflow */
6190+ "2: \n"
6191+ _ASM_EXTABLE(1b, 2b)
6192+#endif
6193+ : "=&r" (result), "+m" (v->counter) : "Ir" (i));
6194+ raw_local_irq_restore(flags);
6195+ }
6196+
6197+ smp_llsc_mb();
6198+
6199+ return result;
6200+}
6201+static __inline__ int atomic_sub_return_unchecked(int i, atomic_unchecked_t *v)
6202 {
6203 int result;
6204
6205@@ -238,7 +562,7 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
6206 * Atomically test @v and subtract @i if @v is greater or equal than @i.
6207 * The function returns the old value of @v minus @i.
6208 */
6209-static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
6210+static __inline__ int atomic_sub_if_positive(int i, atomic_t *v)
6211 {
6212 int result;
6213
6214@@ -295,8 +619,26 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
6215 return result;
6216 }
6217
6218-#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
6219-#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
6220+static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
6221+{
6222+ return cmpxchg(&v->counter, old, new);
6223+}
6224+
6225+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old,
6226+ int new)
6227+{
6228+ return cmpxchg(&(v->counter), old, new);
6229+}
6230+
6231+static inline int atomic_xchg(atomic_t *v, int new)
6232+{
6233+ return xchg(&v->counter, new);
6234+}
6235+
6236+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
6237+{
6238+ return xchg(&(v->counter), new);
6239+}
6240
6241 /**
6242 * __atomic_add_unless - add unless the number is a given value
6243@@ -324,6 +666,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6244
6245 #define atomic_dec_return(v) atomic_sub_return(1, (v))
6246 #define atomic_inc_return(v) atomic_add_return(1, (v))
6247+static __inline__ int atomic_inc_return_unchecked(atomic_unchecked_t *v)
6248+{
6249+ return atomic_add_return_unchecked(1, v);
6250+}
6251
6252 /*
6253 * atomic_sub_and_test - subtract value from variable and test result
6254@@ -345,6 +691,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6255 * other cases.
6256 */
6257 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
6258+static __inline__ int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
6259+{
6260+ return atomic_add_return_unchecked(1, v) == 0;
6261+}
6262
6263 /*
6264 * atomic_dec_and_test - decrement by 1 and test
6265@@ -369,6 +719,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6266 * Atomically increments @v by 1.
6267 */
6268 #define atomic_inc(v) atomic_add(1, (v))
6269+static __inline__ void atomic_inc_unchecked(atomic_unchecked_t *v)
6270+{
6271+ atomic_add_unchecked(1, v);
6272+}
6273
6274 /*
6275 * atomic_dec - decrement and test
6276@@ -377,6 +731,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6277 * Atomically decrements @v by 1.
6278 */
6279 #define atomic_dec(v) atomic_sub(1, (v))
6280+static __inline__ void atomic_dec_unchecked(atomic_unchecked_t *v)
6281+{
6282+ atomic_sub_unchecked(1, v);
6283+}
6284
6285 /*
6286 * atomic_add_negative - add and test if negative
6287@@ -398,14 +756,30 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6288 * @v: pointer of type atomic64_t
6289 *
6290 */
6291-#define atomic64_read(v) (*(volatile long *)&(v)->counter)
6292+static inline long atomic64_read(const atomic64_t *v)
6293+{
6294+ return (*(volatile const long *) &v->counter);
6295+}
6296+
6297+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
6298+{
6299+ return (*(volatile const long *) &v->counter);
6300+}
6301
6302 /*
6303 * atomic64_set - set atomic variable
6304 * @v: pointer of type atomic64_t
6305 * @i: required value
6306 */
6307-#define atomic64_set(v, i) ((v)->counter = (i))
6308+static inline void atomic64_set(atomic64_t *v, long i)
6309+{
6310+ v->counter = i;
6311+}
6312+
6313+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
6314+{
6315+ v->counter = i;
6316+}
6317
6318 /*
6319 * atomic64_add - add integer to atomic variable
6320@@ -414,7 +788,66 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6321 *
6322 * Atomically adds @i to @v.
6323 */
6324-static __inline__ void atomic64_add(long i, atomic64_t * v)
6325+static __inline__ void atomic64_add(long i, atomic64_t *v)
6326+{
6327+ long temp;
6328+
6329+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6330+ __asm__ __volatile__(
6331+ " .set mips3 \n"
6332+ "1: lld %0, %1 # atomic64_add \n"
6333+#ifdef CONFIG_PAX_REFCOUNT
6334+ /* Exception on overflow. */
6335+ "2: dadd %0, %2 \n"
6336+#else
6337+ " daddu %0, %2 \n"
6338+#endif
6339+ " scd %0, %1 \n"
6340+ " beqzl %0, 1b \n"
6341+#ifdef CONFIG_PAX_REFCOUNT
6342+ "3: \n"
6343+ _ASM_EXTABLE(2b, 3b)
6344+#endif
6345+ " .set mips0 \n"
6346+ : "=&r" (temp), "+m" (v->counter)
6347+ : "Ir" (i));
6348+ } else if (kernel_uses_llsc) {
6349+ __asm__ __volatile__(
6350+ " .set mips3 \n"
6351+ "1: lld %0, %1 # atomic64_add \n"
6352+#ifdef CONFIG_PAX_REFCOUNT
6353+ /* Exception on overflow. */
6354+ "2: dadd %0, %2 \n"
6355+#else
6356+ " daddu %0, %2 \n"
6357+#endif
6358+ " scd %0, %1 \n"
6359+ " beqz %0, 1b \n"
6360+#ifdef CONFIG_PAX_REFCOUNT
6361+ "3: \n"
6362+ _ASM_EXTABLE(2b, 3b)
6363+#endif
6364+ " .set mips0 \n"
6365+ : "=&r" (temp), "+m" (v->counter)
6366+ : "Ir" (i));
6367+ } else {
6368+ unsigned long flags;
6369+
6370+ raw_local_irq_save(flags);
6371+ __asm__ __volatile__(
6372+#ifdef CONFIG_PAX_REFCOUNT
6373+ /* Exception on overflow. */
6374+ "1: dadd %0, %1 \n"
6375+ "2: \n"
6376+ _ASM_EXTABLE(1b, 2b)
6377+#else
6378+ " daddu %0, %1 \n"
6379+#endif
6380+ : "+r" (v->counter) : "Ir" (i));
6381+ raw_local_irq_restore(flags);
6382+ }
6383+}
6384+static __inline__ void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
6385 {
6386 if (kernel_uses_llsc && R10000_LLSC_WAR) {
6387 long temp;
6388@@ -457,7 +890,67 @@ static __inline__ void atomic64_add(long i, atomic64_t * v)
6389 *
6390 * Atomically subtracts @i from @v.
6391 */
6392-static __inline__ void atomic64_sub(long i, atomic64_t * v)
6393+static __inline__ void atomic64_sub(long i, atomic64_t *v)
6394+{
6395+ long temp;
6396+
6397+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6398+ __asm__ __volatile__(
6399+ " .set mips3 \n"
6400+ "1: lld %0, %1 # atomic64_sub \n"
6401+#ifdef CONFIG_PAX_REFCOUNT
6402+ /* Exception on overflow. */
6403+ "2: dsub %0, %2 \n"
6404+#else
6405+ " dsubu %0, %2 \n"
6406+#endif
6407+ " scd %0, %1 \n"
6408+ " beqzl %0, 1b \n"
6409+#ifdef CONFIG_PAX_REFCOUNT
6410+ "3: \n"
6411+ _ASM_EXTABLE(2b, 3b)
6412+#endif
6413+ " .set mips0 \n"
6414+ : "=&r" (temp), "+m" (v->counter)
6415+ : "Ir" (i));
6416+ } else if (kernel_uses_llsc) {
6417+ __asm__ __volatile__(
6418+ " .set mips3 \n"
6419+ "1: lld %0, %1 # atomic64_sub \n"
6420+#ifdef CONFIG_PAX_REFCOUNT
6421+ /* Exception on overflow. */
6422+ "2: dsub %0, %2 \n"
6423+#else
6424+ " dsubu %0, %2 \n"
6425+#endif
6426+ " scd %0, %1 \n"
6427+ " beqz %0, 1b \n"
6428+#ifdef CONFIG_PAX_REFCOUNT
6429+ "3: \n"
6430+ _ASM_EXTABLE(2b, 3b)
6431+#endif
6432+ " .set mips0 \n"
6433+ : "=&r" (temp), "+m" (v->counter)
6434+ : "Ir" (i));
6435+ } else {
6436+ unsigned long flags;
6437+
6438+ raw_local_irq_save(flags);
6439+ __asm__ __volatile__(
6440+#ifdef CONFIG_PAX_REFCOUNT
6441+ /* Exception on overflow. */
6442+ "1: dsub %0, %1 \n"
6443+ "2: \n"
6444+ _ASM_EXTABLE(1b, 2b)
6445+#else
6446+ " dsubu %0, %1 \n"
6447+#endif
6448+ : "+r" (v->counter) : "Ir" (i));
6449+ raw_local_irq_restore(flags);
6450+ }
6451+}
6452+
6453+static __inline__ void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
6454 {
6455 if (kernel_uses_llsc && R10000_LLSC_WAR) {
6456 long temp;
6457@@ -496,7 +989,93 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v)
6458 /*
6459 * Same as above, but return the result value
6460 */
6461-static __inline__ long atomic64_add_return(long i, atomic64_t * v)
6462+static __inline__ long atomic64_add_return(long i, atomic64_t *v)
6463+{
6464+ long result;
6465+ long temp;
6466+
6467+ smp_mb__before_llsc();
6468+
6469+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6470+ __asm__ __volatile__(
6471+ " .set mips3 \n"
6472+ "1: lld %1, %2 # atomic64_add_return \n"
6473+#ifdef CONFIG_PAX_REFCOUNT
6474+ "2: dadd %0, %1, %3 \n"
6475+#else
6476+ " daddu %0, %1, %3 \n"
6477+#endif
6478+ " scd %0, %2 \n"
6479+ " beqzl %0, 1b \n"
6480+#ifdef CONFIG_PAX_REFCOUNT
6481+ " b 4f \n"
6482+ " .set noreorder \n"
6483+ "3: b 5f \n"
6484+ " move %0, %1 \n"
6485+ " .set reorder \n"
6486+ _ASM_EXTABLE(2b, 3b)
6487+#endif
6488+ "4: daddu %0, %1, %3 \n"
6489+#ifdef CONFIG_PAX_REFCOUNT
6490+ "5: \n"
6491+#endif
6492+ " .set mips0 \n"
6493+ : "=&r" (result), "=&r" (temp), "+m" (v->counter)
6494+ : "Ir" (i));
6495+ } else if (kernel_uses_llsc) {
6496+ __asm__ __volatile__(
6497+ " .set mips3 \n"
6498+ "1: lld %1, %2 # atomic64_add_return \n"
6499+#ifdef CONFIG_PAX_REFCOUNT
6500+ "2: dadd %0, %1, %3 \n"
6501+#else
6502+ " daddu %0, %1, %3 \n"
6503+#endif
6504+ " scd %0, %2 \n"
6505+ " bnez %0, 4f \n"
6506+ " b 1b \n"
6507+#ifdef CONFIG_PAX_REFCOUNT
6508+ " .set noreorder \n"
6509+ "3: b 5f \n"
6510+ " move %0, %1 \n"
6511+ " .set reorder \n"
6512+ _ASM_EXTABLE(2b, 3b)
6513+#endif
6514+ "4: daddu %0, %1, %3 \n"
6515+#ifdef CONFIG_PAX_REFCOUNT
6516+ "5: \n"
6517+#endif
6518+ " .set mips0 \n"
6519+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
6520+ : "Ir" (i), "m" (v->counter)
6521+ : "memory");
6522+ } else {
6523+ unsigned long flags;
6524+
6525+ raw_local_irq_save(flags);
6526+ __asm__ __volatile__(
6527+ " ld %0, %1 \n"
6528+#ifdef CONFIG_PAX_REFCOUNT
6529+ /* Exception on overflow. */
6530+ "1: dadd %0, %2 \n"
6531+#else
6532+ " daddu %0, %2 \n"
6533+#endif
6534+ " sd %0, %1 \n"
6535+#ifdef CONFIG_PAX_REFCOUNT
6536+ /* Note: Dest reg is not modified on overflow */
6537+ "2: \n"
6538+ _ASM_EXTABLE(1b, 2b)
6539+#endif
6540+ : "=&r" (result), "+m" (v->counter) : "Ir" (i));
6541+ raw_local_irq_restore(flags);
6542+ }
6543+
6544+ smp_llsc_mb();
6545+
6546+ return result;
6547+}
6548+static __inline__ long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
6549 {
6550 long result;
6551
6552@@ -546,7 +1125,97 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v)
6553 return result;
6554 }
6555
6556-static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
6557+static __inline__ long atomic64_sub_return(long i, atomic64_t *v)
6558+{
6559+ long result;
6560+ long temp;
6561+
6562+ smp_mb__before_llsc();
6563+
6564+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6565+ long temp;
6566+
6567+ __asm__ __volatile__(
6568+ " .set mips3 \n"
6569+ "1: lld %1, %2 # atomic64_sub_return \n"
6570+#ifdef CONFIG_PAX_REFCOUNT
6571+ "2: dsub %0, %1, %3 \n"
6572+#else
6573+ " dsubu %0, %1, %3 \n"
6574+#endif
6575+ " scd %0, %2 \n"
6576+ " beqzl %0, 1b \n"
6577+#ifdef CONFIG_PAX_REFCOUNT
6578+ " b 4f \n"
6579+ " .set noreorder \n"
6580+ "3: b 5f \n"
6581+ " move %0, %1 \n"
6582+ " .set reorder \n"
6583+ _ASM_EXTABLE(2b, 3b)
6584+#endif
6585+ "4: dsubu %0, %1, %3 \n"
6586+#ifdef CONFIG_PAX_REFCOUNT
6587+ "5: \n"
6588+#endif
6589+ " .set mips0 \n"
6590+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
6591+ : "Ir" (i), "m" (v->counter)
6592+ : "memory");
6593+ } else if (kernel_uses_llsc) {
6594+ __asm__ __volatile__(
6595+ " .set mips3 \n"
6596+ "1: lld %1, %2 # atomic64_sub_return \n"
6597+#ifdef CONFIG_PAX_REFCOUNT
6598+ "2: dsub %0, %1, %3 \n"
6599+#else
6600+ " dsubu %0, %1, %3 \n"
6601+#endif
6602+ " scd %0, %2 \n"
6603+ " bnez %0, 4f \n"
6604+ " b 1b \n"
6605+#ifdef CONFIG_PAX_REFCOUNT
6606+ " .set noreorder \n"
6607+ "3: b 5f \n"
6608+ " move %0, %1 \n"
6609+ " .set reorder \n"
6610+ _ASM_EXTABLE(2b, 3b)
6611+#endif
6612+ "4: dsubu %0, %1, %3 \n"
6613+#ifdef CONFIG_PAX_REFCOUNT
6614+ "5: \n"
6615+#endif
6616+ " .set mips0 \n"
6617+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
6618+ : "Ir" (i), "m" (v->counter)
6619+ : "memory");
6620+ } else {
6621+ unsigned long flags;
6622+
6623+ raw_local_irq_save(flags);
6624+ __asm__ __volatile__(
6625+ " ld %0, %1 \n"
6626+#ifdef CONFIG_PAX_REFCOUNT
6627+ /* Exception on overflow. */
6628+ "1: dsub %0, %2 \n"
6629+#else
6630+ " dsubu %0, %2 \n"
6631+#endif
6632+ " sd %0, %1 \n"
6633+#ifdef CONFIG_PAX_REFCOUNT
6634+ /* Note: Dest reg is not modified on overflow */
6635+ "2: \n"
6636+ _ASM_EXTABLE(1b, 2b)
6637+#endif
6638+ : "=&r" (result), "+m" (v->counter) : "Ir" (i));
6639+ raw_local_irq_restore(flags);
6640+ }
6641+
6642+ smp_llsc_mb();
6643+
6644+ return result;
6645+}
6646+
6647+static __inline__ long atomic64_sub_return_unchecked(long i, atomic64_unchecked_t *v)
6648 {
6649 long result;
6650
6651@@ -605,7 +1274,7 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
6652 * Atomically test @v and subtract @i if @v is greater or equal than @i.
6653 * The function returns the old value of @v minus @i.
6654 */
6655-static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
6656+static __inline__ long atomic64_sub_if_positive(long i, atomic64_t *v)
6657 {
6658 long result;
6659
6660@@ -662,9 +1331,26 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
6661 return result;
6662 }
6663
6664-#define atomic64_cmpxchg(v, o, n) \
6665- ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
6666-#define atomic64_xchg(v, new) (xchg(&((v)->counter), (new)))
6667+static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
6668+{
6669+ return cmpxchg(&v->counter, old, new);
6670+}
6671+
6672+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old,
6673+ long new)
6674+{
6675+ return cmpxchg(&(v->counter), old, new);
6676+}
6677+
6678+static inline long atomic64_xchg(atomic64_t *v, long new)
6679+{
6680+ return xchg(&v->counter, new);
6681+}
6682+
6683+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
6684+{
6685+ return xchg(&(v->counter), new);
6686+}
6687
6688 /**
6689 * atomic64_add_unless - add unless the number is a given value
6690@@ -694,6 +1380,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6691
6692 #define atomic64_dec_return(v) atomic64_sub_return(1, (v))
6693 #define atomic64_inc_return(v) atomic64_add_return(1, (v))
6694+#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1, (v))
6695
6696 /*
6697 * atomic64_sub_and_test - subtract value from variable and test result
6698@@ -715,6 +1402,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6699 * other cases.
6700 */
6701 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
6702+#define atomic64_inc_and_test_unchecked(v) atomic64_add_return_unchecked(1, (v)) == 0)
6703
6704 /*
6705 * atomic64_dec_and_test - decrement by 1 and test
6706@@ -739,6 +1427,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6707 * Atomically increments @v by 1.
6708 */
6709 #define atomic64_inc(v) atomic64_add(1, (v))
6710+#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1, (v))
6711
6712 /*
6713 * atomic64_dec - decrement and test
6714@@ -747,6 +1436,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6715 * Atomically decrements @v by 1.
6716 */
6717 #define atomic64_dec(v) atomic64_sub(1, (v))
6718+#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1, (v))
6719
6720 /*
6721 * atomic64_add_negative - add and test if negative
6722diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h
6723index d0101dd..266982c 100644
6724--- a/arch/mips/include/asm/barrier.h
6725+++ b/arch/mips/include/asm/barrier.h
6726@@ -184,7 +184,7 @@
6727 do { \
6728 compiletime_assert_atomic_type(*p); \
6729 smp_mb(); \
6730- ACCESS_ONCE(*p) = (v); \
6731+ ACCESS_ONCE_RW(*p) = (v); \
6732 } while (0)
6733
6734 #define smp_load_acquire(p) \
6735diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
6736index b4db69f..8f3b093 100644
6737--- a/arch/mips/include/asm/cache.h
6738+++ b/arch/mips/include/asm/cache.h
6739@@ -9,10 +9,11 @@
6740 #ifndef _ASM_CACHE_H
6741 #define _ASM_CACHE_H
6742
6743+#include <linux/const.h>
6744 #include <kmalloc.h>
6745
6746 #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
6747-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
6748+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6749
6750 #define SMP_CACHE_SHIFT L1_CACHE_SHIFT
6751 #define SMP_CACHE_BYTES L1_CACHE_BYTES
6752diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
6753index 1d38fe0..9beabc9 100644
6754--- a/arch/mips/include/asm/elf.h
6755+++ b/arch/mips/include/asm/elf.h
6756@@ -381,13 +381,16 @@ extern const char *__elf_platform;
6757 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
6758 #endif
6759
6760+#ifdef CONFIG_PAX_ASLR
6761+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
6762+
6763+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6764+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6765+#endif
6766+
6767 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
6768 struct linux_binprm;
6769 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
6770 int uses_interp);
6771
6772-struct mm_struct;
6773-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
6774-#define arch_randomize_brk arch_randomize_brk
6775-
6776 #endif /* _ASM_ELF_H */
6777diff --git a/arch/mips/include/asm/exec.h b/arch/mips/include/asm/exec.h
6778index c1f6afa..38cc6e9 100644
6779--- a/arch/mips/include/asm/exec.h
6780+++ b/arch/mips/include/asm/exec.h
6781@@ -12,6 +12,6 @@
6782 #ifndef _ASM_EXEC_H
6783 #define _ASM_EXEC_H
6784
6785-extern unsigned long arch_align_stack(unsigned long sp);
6786+#define arch_align_stack(x) ((x) & ~0xfUL)
6787
6788 #endif /* _ASM_EXEC_H */
6789diff --git a/arch/mips/include/asm/hw_irq.h b/arch/mips/include/asm/hw_irq.h
6790index 9e8ef59..1139d6b 100644
6791--- a/arch/mips/include/asm/hw_irq.h
6792+++ b/arch/mips/include/asm/hw_irq.h
6793@@ -10,7 +10,7 @@
6794
6795 #include <linux/atomic.h>
6796
6797-extern atomic_t irq_err_count;
6798+extern atomic_unchecked_t irq_err_count;
6799
6800 /*
6801 * interrupt-retrigger: NOP for now. This may not be appropriate for all
6802diff --git a/arch/mips/include/asm/local.h b/arch/mips/include/asm/local.h
6803index 46dfc3c..a16b13a 100644
6804--- a/arch/mips/include/asm/local.h
6805+++ b/arch/mips/include/asm/local.h
6806@@ -12,15 +12,25 @@ typedef struct
6807 atomic_long_t a;
6808 } local_t;
6809
6810+typedef struct {
6811+ atomic_long_unchecked_t a;
6812+} local_unchecked_t;
6813+
6814 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
6815
6816 #define local_read(l) atomic_long_read(&(l)->a)
6817+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
6818 #define local_set(l, i) atomic_long_set(&(l)->a, (i))
6819+#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
6820
6821 #define local_add(i, l) atomic_long_add((i), (&(l)->a))
6822+#define local_add_unchecked(i, l) atomic_long_add_unchecked((i), (&(l)->a))
6823 #define local_sub(i, l) atomic_long_sub((i), (&(l)->a))
6824+#define local_sub_unchecked(i, l) atomic_long_sub_unchecked((i), (&(l)->a))
6825 #define local_inc(l) atomic_long_inc(&(l)->a)
6826+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
6827 #define local_dec(l) atomic_long_dec(&(l)->a)
6828+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
6829
6830 /*
6831 * Same as above, but return the result value
6832@@ -70,6 +80,51 @@ static __inline__ long local_add_return(long i, local_t * l)
6833 return result;
6834 }
6835
6836+static __inline__ long local_add_return_unchecked(long i, local_unchecked_t * l)
6837+{
6838+ unsigned long result;
6839+
6840+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6841+ unsigned long temp;
6842+
6843+ __asm__ __volatile__(
6844+ " .set mips3 \n"
6845+ "1:" __LL "%1, %2 # local_add_return \n"
6846+ " addu %0, %1, %3 \n"
6847+ __SC "%0, %2 \n"
6848+ " beqzl %0, 1b \n"
6849+ " addu %0, %1, %3 \n"
6850+ " .set mips0 \n"
6851+ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
6852+ : "Ir" (i), "m" (l->a.counter)
6853+ : "memory");
6854+ } else if (kernel_uses_llsc) {
6855+ unsigned long temp;
6856+
6857+ __asm__ __volatile__(
6858+ " .set mips3 \n"
6859+ "1:" __LL "%1, %2 # local_add_return \n"
6860+ " addu %0, %1, %3 \n"
6861+ __SC "%0, %2 \n"
6862+ " beqz %0, 1b \n"
6863+ " addu %0, %1, %3 \n"
6864+ " .set mips0 \n"
6865+ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
6866+ : "Ir" (i), "m" (l->a.counter)
6867+ : "memory");
6868+ } else {
6869+ unsigned long flags;
6870+
6871+ local_irq_save(flags);
6872+ result = l->a.counter;
6873+ result += i;
6874+ l->a.counter = result;
6875+ local_irq_restore(flags);
6876+ }
6877+
6878+ return result;
6879+}
6880+
6881 static __inline__ long local_sub_return(long i, local_t * l)
6882 {
6883 unsigned long result;
6884@@ -117,6 +172,8 @@ static __inline__ long local_sub_return(long i, local_t * l)
6885
6886 #define local_cmpxchg(l, o, n) \
6887 ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
6888+#define local_cmpxchg_unchecked(l, o, n) \
6889+ ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
6890 #define local_xchg(l, n) (atomic_long_xchg((&(l)->a), (n)))
6891
6892 /**
6893diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
6894index 3be8180..c4798d5 100644
6895--- a/arch/mips/include/asm/page.h
6896+++ b/arch/mips/include/asm/page.h
6897@@ -120,7 +120,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
6898 #ifdef CONFIG_CPU_MIPS32
6899 typedef struct { unsigned long pte_low, pte_high; } pte_t;
6900 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
6901- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
6902+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
6903 #else
6904 typedef struct { unsigned long long pte; } pte_t;
6905 #define pte_val(x) ((x).pte)
6906diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
6907index b336037..5b874cc 100644
6908--- a/arch/mips/include/asm/pgalloc.h
6909+++ b/arch/mips/include/asm/pgalloc.h
6910@@ -37,6 +37,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6911 {
6912 set_pud(pud, __pud((unsigned long)pmd));
6913 }
6914+
6915+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6916+{
6917+ pud_populate(mm, pud, pmd);
6918+}
6919 #endif
6920
6921 /*
6922diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
6923index df49a30..c0d3dd6 100644
6924--- a/arch/mips/include/asm/pgtable.h
6925+++ b/arch/mips/include/asm/pgtable.h
6926@@ -20,6 +20,9 @@
6927 #include <asm/io.h>
6928 #include <asm/pgtable-bits.h>
6929
6930+#define ktla_ktva(addr) (addr)
6931+#define ktva_ktla(addr) (addr)
6932+
6933 struct mm_struct;
6934 struct vm_area_struct;
6935
6936diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
6937index 7de8658..c109224 100644
6938--- a/arch/mips/include/asm/thread_info.h
6939+++ b/arch/mips/include/asm/thread_info.h
6940@@ -105,6 +105,9 @@ static inline struct thread_info *current_thread_info(void)
6941 #define TIF_SECCOMP 4 /* secure computing */
6942 #define TIF_NOTIFY_RESUME 5 /* callback before returning to user */
6943 #define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */
6944+/* li takes a 32bit immediate */
6945+#define TIF_GRSEC_SETXID 10 /* update credentials on syscall entry/exit */
6946+
6947 #define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */
6948 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
6949 #define TIF_NOHZ 19 /* in adaptive nohz mode */
6950@@ -138,14 +141,16 @@ static inline struct thread_info *current_thread_info(void)
6951 #define _TIF_USEDMSA (1<<TIF_USEDMSA)
6952 #define _TIF_MSA_CTX_LIVE (1<<TIF_MSA_CTX_LIVE)
6953 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
6954+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
6955
6956 #define _TIF_WORK_SYSCALL_ENTRY (_TIF_NOHZ | _TIF_SYSCALL_TRACE | \
6957 _TIF_SYSCALL_AUDIT | \
6958- _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
6959+ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \
6960+ _TIF_GRSEC_SETXID)
6961
6962 /* work to do in syscall_trace_leave() */
6963 #define _TIF_WORK_SYSCALL_EXIT (_TIF_NOHZ | _TIF_SYSCALL_TRACE | \
6964- _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT)
6965+ _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
6966
6967 /* work to do on interrupt/exception return */
6968 #define _TIF_WORK_MASK \
6969@@ -153,7 +158,7 @@ static inline struct thread_info *current_thread_info(void)
6970 /* work to do on any return to u-space */
6971 #define _TIF_ALLWORK_MASK (_TIF_NOHZ | _TIF_WORK_MASK | \
6972 _TIF_WORK_SYSCALL_EXIT | \
6973- _TIF_SYSCALL_TRACEPOINT)
6974+ _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
6975
6976 /*
6977 * We stash processor id into a COP0 register to retrieve it fast
6978diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
6979index a109510..94ee3f6 100644
6980--- a/arch/mips/include/asm/uaccess.h
6981+++ b/arch/mips/include/asm/uaccess.h
6982@@ -130,6 +130,7 @@ extern u64 __ua_limit;
6983 __ok == 0; \
6984 })
6985
6986+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
6987 #define access_ok(type, addr, size) \
6988 likely(__access_ok((addr), (size), __access_mask))
6989
6990diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
6991index 1188e00..41cf144 100644
6992--- a/arch/mips/kernel/binfmt_elfn32.c
6993+++ b/arch/mips/kernel/binfmt_elfn32.c
6994@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
6995 #undef ELF_ET_DYN_BASE
6996 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
6997
6998+#ifdef CONFIG_PAX_ASLR
6999+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
7000+
7001+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
7002+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
7003+#endif
7004+
7005 #include <asm/processor.h>
7006 #include <linux/module.h>
7007 #include <linux/elfcore.h>
7008diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
7009index 9287678..f870e47 100644
7010--- a/arch/mips/kernel/binfmt_elfo32.c
7011+++ b/arch/mips/kernel/binfmt_elfo32.c
7012@@ -70,6 +70,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
7013 #undef ELF_ET_DYN_BASE
7014 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
7015
7016+#ifdef CONFIG_PAX_ASLR
7017+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
7018+
7019+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
7020+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
7021+#endif
7022+
7023 #include <asm/processor.h>
7024
7025 #include <linux/module.h>
7026diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c
7027index 50b3648..c2f3cec 100644
7028--- a/arch/mips/kernel/i8259.c
7029+++ b/arch/mips/kernel/i8259.c
7030@@ -201,7 +201,7 @@ spurious_8259A_irq:
7031 printk(KERN_DEBUG "spurious 8259A interrupt: IRQ%d.\n", irq);
7032 spurious_irq_mask |= irqmask;
7033 }
7034- atomic_inc(&irq_err_count);
7035+ atomic_inc_unchecked(&irq_err_count);
7036 /*
7037 * Theoretically we do not have to handle this IRQ,
7038 * but in Linux this does not cause problems and is
7039diff --git a/arch/mips/kernel/irq-gt641xx.c b/arch/mips/kernel/irq-gt641xx.c
7040index 44a1f79..2bd6aa3 100644
7041--- a/arch/mips/kernel/irq-gt641xx.c
7042+++ b/arch/mips/kernel/irq-gt641xx.c
7043@@ -110,7 +110,7 @@ void gt641xx_irq_dispatch(void)
7044 }
7045 }
7046
7047- atomic_inc(&irq_err_count);
7048+ atomic_inc_unchecked(&irq_err_count);
7049 }
7050
7051 void __init gt641xx_irq_init(void)
7052diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
7053index d2bfbc2..a8eacd2 100644
7054--- a/arch/mips/kernel/irq.c
7055+++ b/arch/mips/kernel/irq.c
7056@@ -76,17 +76,17 @@ void ack_bad_irq(unsigned int irq)
7057 printk("unexpected IRQ # %d\n", irq);
7058 }
7059
7060-atomic_t irq_err_count;
7061+atomic_unchecked_t irq_err_count;
7062
7063 int arch_show_interrupts(struct seq_file *p, int prec)
7064 {
7065- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
7066+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
7067 return 0;
7068 }
7069
7070 asmlinkage void spurious_interrupt(void)
7071 {
7072- atomic_inc(&irq_err_count);
7073+ atomic_inc_unchecked(&irq_err_count);
7074 }
7075
7076 void __init init_IRQ(void)
7077@@ -109,7 +109,10 @@ void __init init_IRQ(void)
7078 #endif
7079 }
7080
7081+
7082 #ifdef DEBUG_STACKOVERFLOW
7083+extern void gr_handle_kernel_exploit(void);
7084+
7085 static inline void check_stack_overflow(void)
7086 {
7087 unsigned long sp;
7088@@ -125,6 +128,7 @@ static inline void check_stack_overflow(void)
7089 printk("do_IRQ: stack overflow: %ld\n",
7090 sp - sizeof(struct thread_info));
7091 dump_stack();
7092+ gr_handle_kernel_exploit();
7093 }
7094 }
7095 #else
7096diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c
7097index 0614717..002fa43 100644
7098--- a/arch/mips/kernel/pm-cps.c
7099+++ b/arch/mips/kernel/pm-cps.c
7100@@ -172,7 +172,7 @@ int cps_pm_enter_state(enum cps_pm_state state)
7101 nc_core_ready_count = nc_addr;
7102
7103 /* Ensure ready_count is zero-initialised before the assembly runs */
7104- ACCESS_ONCE(*nc_core_ready_count) = 0;
7105+ ACCESS_ONCE_RW(*nc_core_ready_count) = 0;
7106 coupled_barrier(&per_cpu(pm_barrier, core), online);
7107
7108 /* Run the generated entry code */
7109diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
7110index 636b074..8fbb91f 100644
7111--- a/arch/mips/kernel/process.c
7112+++ b/arch/mips/kernel/process.c
7113@@ -520,15 +520,3 @@ unsigned long get_wchan(struct task_struct *task)
7114 out:
7115 return pc;
7116 }
7117-
7118-/*
7119- * Don't forget that the stack pointer must be aligned on a 8 bytes
7120- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
7121- */
7122-unsigned long arch_align_stack(unsigned long sp)
7123-{
7124- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
7125- sp -= get_random_int() & ~PAGE_MASK;
7126-
7127- return sp & ALMASK;
7128-}
7129diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
7130index 645b3c4..909c75a 100644
7131--- a/arch/mips/kernel/ptrace.c
7132+++ b/arch/mips/kernel/ptrace.c
7133@@ -761,6 +761,10 @@ long arch_ptrace(struct task_struct *child, long request,
7134 return ret;
7135 }
7136
7137+#ifdef CONFIG_GRKERNSEC_SETXID
7138+extern void gr_delayed_cred_worker(void);
7139+#endif
7140+
7141 /*
7142 * Notification of system call entry/exit
7143 * - triggered by current->work.syscall_trace
7144@@ -777,6 +781,11 @@ asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
7145 tracehook_report_syscall_entry(regs))
7146 ret = -1;
7147
7148+#ifdef CONFIG_GRKERNSEC_SETXID
7149+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
7150+ gr_delayed_cred_worker();
7151+#endif
7152+
7153 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
7154 trace_sys_enter(regs, regs->regs[2]);
7155
7156diff --git a/arch/mips/kernel/reset.c b/arch/mips/kernel/reset.c
7157index 07fc524..b9d7f28 100644
7158--- a/arch/mips/kernel/reset.c
7159+++ b/arch/mips/kernel/reset.c
7160@@ -13,6 +13,7 @@
7161 #include <linux/reboot.h>
7162
7163 #include <asm/reboot.h>
7164+#include <asm/bug.h>
7165
7166 /*
7167 * Urgs ... Too many MIPS machines to handle this in a generic way.
7168@@ -29,16 +30,19 @@ void machine_restart(char *command)
7169 {
7170 if (_machine_restart)
7171 _machine_restart(command);
7172+ BUG();
7173 }
7174
7175 void machine_halt(void)
7176 {
7177 if (_machine_halt)
7178 _machine_halt();
7179+ BUG();
7180 }
7181
7182 void machine_power_off(void)
7183 {
7184 if (pm_power_off)
7185 pm_power_off();
7186+ BUG();
7187 }
7188diff --git a/arch/mips/kernel/sync-r4k.c b/arch/mips/kernel/sync-r4k.c
7189index 2242bdd..b284048 100644
7190--- a/arch/mips/kernel/sync-r4k.c
7191+++ b/arch/mips/kernel/sync-r4k.c
7192@@ -18,8 +18,8 @@
7193 #include <asm/mipsregs.h>
7194
7195 static atomic_t count_start_flag = ATOMIC_INIT(0);
7196-static atomic_t count_count_start = ATOMIC_INIT(0);
7197-static atomic_t count_count_stop = ATOMIC_INIT(0);
7198+static atomic_unchecked_t count_count_start = ATOMIC_INIT(0);
7199+static atomic_unchecked_t count_count_stop = ATOMIC_INIT(0);
7200 static atomic_t count_reference = ATOMIC_INIT(0);
7201
7202 #define COUNTON 100
7203@@ -58,13 +58,13 @@ void synchronise_count_master(int cpu)
7204
7205 for (i = 0; i < NR_LOOPS; i++) {
7206 /* slaves loop on '!= 2' */
7207- while (atomic_read(&count_count_start) != 1)
7208+ while (atomic_read_unchecked(&count_count_start) != 1)
7209 mb();
7210- atomic_set(&count_count_stop, 0);
7211+ atomic_set_unchecked(&count_count_stop, 0);
7212 smp_wmb();
7213
7214 /* this lets the slaves write their count register */
7215- atomic_inc(&count_count_start);
7216+ atomic_inc_unchecked(&count_count_start);
7217
7218 /*
7219 * Everyone initialises count in the last loop:
7220@@ -75,11 +75,11 @@ void synchronise_count_master(int cpu)
7221 /*
7222 * Wait for all slaves to leave the synchronization point:
7223 */
7224- while (atomic_read(&count_count_stop) != 1)
7225+ while (atomic_read_unchecked(&count_count_stop) != 1)
7226 mb();
7227- atomic_set(&count_count_start, 0);
7228+ atomic_set_unchecked(&count_count_start, 0);
7229 smp_wmb();
7230- atomic_inc(&count_count_stop);
7231+ atomic_inc_unchecked(&count_count_stop);
7232 }
7233 /* Arrange for an interrupt in a short while */
7234 write_c0_compare(read_c0_count() + COUNTON);
7235@@ -112,8 +112,8 @@ void synchronise_count_slave(int cpu)
7236 initcount = atomic_read(&count_reference);
7237
7238 for (i = 0; i < NR_LOOPS; i++) {
7239- atomic_inc(&count_count_start);
7240- while (atomic_read(&count_count_start) != 2)
7241+ atomic_inc_unchecked(&count_count_start);
7242+ while (atomic_read_unchecked(&count_count_start) != 2)
7243 mb();
7244
7245 /*
7246@@ -122,8 +122,8 @@ void synchronise_count_slave(int cpu)
7247 if (i == NR_LOOPS-1)
7248 write_c0_count(initcount);
7249
7250- atomic_inc(&count_count_stop);
7251- while (atomic_read(&count_count_stop) != 2)
7252+ atomic_inc_unchecked(&count_count_stop);
7253+ while (atomic_read_unchecked(&count_count_stop) != 2)
7254 mb();
7255 }
7256 /* Arrange for an interrupt in a short while */
7257diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
7258index 22b19c2..c5cc8c4 100644
7259--- a/arch/mips/kernel/traps.c
7260+++ b/arch/mips/kernel/traps.c
7261@@ -688,7 +688,18 @@ asmlinkage void do_ov(struct pt_regs *regs)
7262 siginfo_t info;
7263
7264 prev_state = exception_enter();
7265- die_if_kernel("Integer overflow", regs);
7266+ if (unlikely(!user_mode(regs))) {
7267+
7268+#ifdef CONFIG_PAX_REFCOUNT
7269+ if (fixup_exception(regs)) {
7270+ pax_report_refcount_overflow(regs);
7271+ exception_exit(prev_state);
7272+ return;
7273+ }
7274+#endif
7275+
7276+ die("Integer overflow", regs);
7277+ }
7278
7279 info.si_code = FPE_INTOVF;
7280 info.si_signo = SIGFPE;
7281diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
7282index cd71141..e02c4df 100644
7283--- a/arch/mips/kvm/mips.c
7284+++ b/arch/mips/kvm/mips.c
7285@@ -839,7 +839,7 @@ long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
7286 return r;
7287 }
7288
7289-int kvm_arch_init(void *opaque)
7290+int kvm_arch_init(const void *opaque)
7291 {
7292 if (kvm_mips_callbacks) {
7293 kvm_err("kvm: module already exists\n");
7294diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
7295index becc42b..9e43d4b 100644
7296--- a/arch/mips/mm/fault.c
7297+++ b/arch/mips/mm/fault.c
7298@@ -28,6 +28,23 @@
7299 #include <asm/highmem.h> /* For VMALLOC_END */
7300 #include <linux/kdebug.h>
7301
7302+#ifdef CONFIG_PAX_PAGEEXEC
7303+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
7304+{
7305+ unsigned long i;
7306+
7307+ printk(KERN_ERR "PAX: bytes at PC: ");
7308+ for (i = 0; i < 5; i++) {
7309+ unsigned int c;
7310+ if (get_user(c, (unsigned int *)pc+i))
7311+ printk(KERN_CONT "???????? ");
7312+ else
7313+ printk(KERN_CONT "%08x ", c);
7314+ }
7315+ printk("\n");
7316+}
7317+#endif
7318+
7319 /*
7320 * This routine handles page faults. It determines the address,
7321 * and the problem, and then passes it off to one of the appropriate
7322@@ -199,6 +216,14 @@ bad_area:
7323 bad_area_nosemaphore:
7324 /* User mode accesses just cause a SIGSEGV */
7325 if (user_mode(regs)) {
7326+
7327+#ifdef CONFIG_PAX_PAGEEXEC
7328+ if (cpu_has_rixi && (mm->pax_flags & MF_PAX_PAGEEXEC) && !write && address == instruction_pointer(regs)) {
7329+ pax_report_fault(regs, (void *)address, (void *)user_stack_pointer(regs));
7330+ do_group_exit(SIGKILL);
7331+ }
7332+#endif
7333+
7334 tsk->thread.cp0_badvaddr = address;
7335 tsk->thread.error_code = write;
7336 #if 0
7337diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
7338index f1baadd..5472dca 100644
7339--- a/arch/mips/mm/mmap.c
7340+++ b/arch/mips/mm/mmap.c
7341@@ -59,6 +59,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
7342 struct vm_area_struct *vma;
7343 unsigned long addr = addr0;
7344 int do_color_align;
7345+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
7346 struct vm_unmapped_area_info info;
7347
7348 if (unlikely(len > TASK_SIZE))
7349@@ -84,6 +85,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
7350 do_color_align = 1;
7351
7352 /* requesting a specific address */
7353+
7354+#ifdef CONFIG_PAX_RANDMMAP
7355+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
7356+#endif
7357+
7358 if (addr) {
7359 if (do_color_align)
7360 addr = COLOUR_ALIGN(addr, pgoff);
7361@@ -91,14 +97,14 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
7362 addr = PAGE_ALIGN(addr);
7363
7364 vma = find_vma(mm, addr);
7365- if (TASK_SIZE - len >= addr &&
7366- (!vma || addr + len <= vma->vm_start))
7367+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
7368 return addr;
7369 }
7370
7371 info.length = len;
7372 info.align_mask = do_color_align ? (PAGE_MASK & shm_align_mask) : 0;
7373 info.align_offset = pgoff << PAGE_SHIFT;
7374+ info.threadstack_offset = offset;
7375
7376 if (dir == DOWN) {
7377 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
7378@@ -146,6 +152,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7379 {
7380 unsigned long random_factor = 0UL;
7381
7382+#ifdef CONFIG_PAX_RANDMMAP
7383+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
7384+#endif
7385+
7386 if (current->flags & PF_RANDOMIZE) {
7387 random_factor = get_random_int();
7388 random_factor = random_factor << PAGE_SHIFT;
7389@@ -157,40 +167,25 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7390
7391 if (mmap_is_legacy()) {
7392 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
7393+
7394+#ifdef CONFIG_PAX_RANDMMAP
7395+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7396+ mm->mmap_base += mm->delta_mmap;
7397+#endif
7398+
7399 mm->get_unmapped_area = arch_get_unmapped_area;
7400 } else {
7401 mm->mmap_base = mmap_base(random_factor);
7402+
7403+#ifdef CONFIG_PAX_RANDMMAP
7404+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7405+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
7406+#endif
7407+
7408 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
7409 }
7410 }
7411
7412-static inline unsigned long brk_rnd(void)
7413-{
7414- unsigned long rnd = get_random_int();
7415-
7416- rnd = rnd << PAGE_SHIFT;
7417- /* 8MB for 32bit, 256MB for 64bit */
7418- if (TASK_IS_32BIT_ADDR)
7419- rnd = rnd & 0x7ffffful;
7420- else
7421- rnd = rnd & 0xffffffful;
7422-
7423- return rnd;
7424-}
7425-
7426-unsigned long arch_randomize_brk(struct mm_struct *mm)
7427-{
7428- unsigned long base = mm->brk;
7429- unsigned long ret;
7430-
7431- ret = PAGE_ALIGN(base + brk_rnd());
7432-
7433- if (ret < mm->brk)
7434- return mm->brk;
7435-
7436- return ret;
7437-}
7438-
7439 int __virt_addr_valid(const volatile void *kaddr)
7440 {
7441 return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
7442diff --git a/arch/mips/net/bpf_jit.c b/arch/mips/net/bpf_jit.c
7443index 9f7ecbd..6e370fc 100644
7444--- a/arch/mips/net/bpf_jit.c
7445+++ b/arch/mips/net/bpf_jit.c
7446@@ -1428,5 +1428,6 @@ void bpf_jit_free(struct bpf_prog *fp)
7447 {
7448 if (fp->jited)
7449 module_free(NULL, fp->bpf_func);
7450- kfree(fp);
7451+
7452+ bpf_prog_unlock_free(fp);
7453 }
7454diff --git a/arch/mips/pci/pci-octeon.c b/arch/mips/pci/pci-octeon.c
7455index 59cccd9..f39ac2f 100644
7456--- a/arch/mips/pci/pci-octeon.c
7457+++ b/arch/mips/pci/pci-octeon.c
7458@@ -327,8 +327,8 @@ static int octeon_write_config(struct pci_bus *bus, unsigned int devfn,
7459
7460
7461 static struct pci_ops octeon_pci_ops = {
7462- octeon_read_config,
7463- octeon_write_config,
7464+ .read = octeon_read_config,
7465+ .write = octeon_write_config,
7466 };
7467
7468 static struct resource octeon_pci_mem_resource = {
7469diff --git a/arch/mips/pci/pcie-octeon.c b/arch/mips/pci/pcie-octeon.c
7470index 5e36c33..eb4a17b 100644
7471--- a/arch/mips/pci/pcie-octeon.c
7472+++ b/arch/mips/pci/pcie-octeon.c
7473@@ -1792,8 +1792,8 @@ static int octeon_dummy_write_config(struct pci_bus *bus, unsigned int devfn,
7474 }
7475
7476 static struct pci_ops octeon_pcie0_ops = {
7477- octeon_pcie0_read_config,
7478- octeon_pcie0_write_config,
7479+ .read = octeon_pcie0_read_config,
7480+ .write = octeon_pcie0_write_config,
7481 };
7482
7483 static struct resource octeon_pcie0_mem_resource = {
7484@@ -1813,8 +1813,8 @@ static struct pci_controller octeon_pcie0_controller = {
7485 };
7486
7487 static struct pci_ops octeon_pcie1_ops = {
7488- octeon_pcie1_read_config,
7489- octeon_pcie1_write_config,
7490+ .read = octeon_pcie1_read_config,
7491+ .write = octeon_pcie1_write_config,
7492 };
7493
7494 static struct resource octeon_pcie1_mem_resource = {
7495@@ -1834,8 +1834,8 @@ static struct pci_controller octeon_pcie1_controller = {
7496 };
7497
7498 static struct pci_ops octeon_dummy_ops = {
7499- octeon_dummy_read_config,
7500- octeon_dummy_write_config,
7501+ .read = octeon_dummy_read_config,
7502+ .write = octeon_dummy_write_config,
7503 };
7504
7505 static struct resource octeon_dummy_mem_resource = {
7506diff --git a/arch/mips/sgi-ip27/ip27-nmi.c b/arch/mips/sgi-ip27/ip27-nmi.c
7507index a2358b4..7cead4f 100644
7508--- a/arch/mips/sgi-ip27/ip27-nmi.c
7509+++ b/arch/mips/sgi-ip27/ip27-nmi.c
7510@@ -187,9 +187,9 @@ void
7511 cont_nmi_dump(void)
7512 {
7513 #ifndef REAL_NMI_SIGNAL
7514- static atomic_t nmied_cpus = ATOMIC_INIT(0);
7515+ static atomic_unchecked_t nmied_cpus = ATOMIC_INIT(0);
7516
7517- atomic_inc(&nmied_cpus);
7518+ atomic_inc_unchecked(&nmied_cpus);
7519 #endif
7520 /*
7521 * Only allow 1 cpu to proceed
7522@@ -233,7 +233,7 @@ cont_nmi_dump(void)
7523 udelay(10000);
7524 }
7525 #else
7526- while (atomic_read(&nmied_cpus) != num_online_cpus());
7527+ while (atomic_read_unchecked(&nmied_cpus) != num_online_cpus());
7528 #endif
7529
7530 /*
7531diff --git a/arch/mips/sni/rm200.c b/arch/mips/sni/rm200.c
7532index a046b30..6799527 100644
7533--- a/arch/mips/sni/rm200.c
7534+++ b/arch/mips/sni/rm200.c
7535@@ -270,7 +270,7 @@ spurious_8259A_irq:
7536 "spurious RM200 8259A interrupt: IRQ%d.\n", irq);
7537 spurious_irq_mask |= irqmask;
7538 }
7539- atomic_inc(&irq_err_count);
7540+ atomic_inc_unchecked(&irq_err_count);
7541 /*
7542 * Theoretically we do not have to handle this IRQ,
7543 * but in Linux this does not cause problems and is
7544diff --git a/arch/mips/vr41xx/common/icu.c b/arch/mips/vr41xx/common/icu.c
7545index 41e873b..34d33a7 100644
7546--- a/arch/mips/vr41xx/common/icu.c
7547+++ b/arch/mips/vr41xx/common/icu.c
7548@@ -653,7 +653,7 @@ static int icu_get_irq(unsigned int irq)
7549
7550 printk(KERN_ERR "spurious ICU interrupt: %04x,%04x\n", pend1, pend2);
7551
7552- atomic_inc(&irq_err_count);
7553+ atomic_inc_unchecked(&irq_err_count);
7554
7555 return -1;
7556 }
7557diff --git a/arch/mips/vr41xx/common/irq.c b/arch/mips/vr41xx/common/irq.c
7558index ae0e4ee..e8f0692 100644
7559--- a/arch/mips/vr41xx/common/irq.c
7560+++ b/arch/mips/vr41xx/common/irq.c
7561@@ -64,7 +64,7 @@ static void irq_dispatch(unsigned int irq)
7562 irq_cascade_t *cascade;
7563
7564 if (irq >= NR_IRQS) {
7565- atomic_inc(&irq_err_count);
7566+ atomic_inc_unchecked(&irq_err_count);
7567 return;
7568 }
7569
7570@@ -84,7 +84,7 @@ static void irq_dispatch(unsigned int irq)
7571 ret = cascade->get_irq(irq);
7572 irq = ret;
7573 if (ret < 0)
7574- atomic_inc(&irq_err_count);
7575+ atomic_inc_unchecked(&irq_err_count);
7576 else
7577 irq_dispatch(irq);
7578 if (!irqd_irq_disabled(idata) && chip->irq_unmask)
7579diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h
7580index 967d144..db12197 100644
7581--- a/arch/mn10300/proc-mn103e010/include/proc/cache.h
7582+++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h
7583@@ -11,12 +11,14 @@
7584 #ifndef _ASM_PROC_CACHE_H
7585 #define _ASM_PROC_CACHE_H
7586
7587+#include <linux/const.h>
7588+
7589 /* L1 cache */
7590
7591 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
7592 #define L1_CACHE_NENTRIES 256 /* number of entries in each way */
7593-#define L1_CACHE_BYTES 16 /* bytes per entry */
7594 #define L1_CACHE_SHIFT 4 /* shift for bytes per entry */
7595+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
7596 #define L1_CACHE_WAYDISP 0x1000 /* displacement of one way from the next */
7597
7598 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
7599diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
7600index bcb5df2..84fabd2 100644
7601--- a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
7602+++ b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
7603@@ -16,13 +16,15 @@
7604 #ifndef _ASM_PROC_CACHE_H
7605 #define _ASM_PROC_CACHE_H
7606
7607+#include <linux/const.h>
7608+
7609 /*
7610 * L1 cache
7611 */
7612 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
7613 #define L1_CACHE_NENTRIES 128 /* number of entries in each way */
7614-#define L1_CACHE_BYTES 32 /* bytes per entry */
7615 #define L1_CACHE_SHIFT 5 /* shift for bytes per entry */
7616+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
7617 #define L1_CACHE_WAYDISP 0x1000 /* distance from one way to the next */
7618
7619 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
7620diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h
7621index 4ce7a01..449202a 100644
7622--- a/arch/openrisc/include/asm/cache.h
7623+++ b/arch/openrisc/include/asm/cache.h
7624@@ -19,11 +19,13 @@
7625 #ifndef __ASM_OPENRISC_CACHE_H
7626 #define __ASM_OPENRISC_CACHE_H
7627
7628+#include <linux/const.h>
7629+
7630 /* FIXME: How can we replace these with values from the CPU...
7631 * they shouldn't be hard-coded!
7632 */
7633
7634-#define L1_CACHE_BYTES 16
7635 #define L1_CACHE_SHIFT 4
7636+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7637
7638 #endif /* __ASM_OPENRISC_CACHE_H */
7639diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
7640index 0be2db2..1b0f26d 100644
7641--- a/arch/parisc/include/asm/atomic.h
7642+++ b/arch/parisc/include/asm/atomic.h
7643@@ -248,6 +248,16 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
7644 return dec;
7645 }
7646
7647+#define atomic64_read_unchecked(v) atomic64_read(v)
7648+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
7649+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
7650+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
7651+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
7652+#define atomic64_inc_unchecked(v) atomic64_inc(v)
7653+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
7654+#define atomic64_dec_unchecked(v) atomic64_dec(v)
7655+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
7656+
7657 #endif /* !CONFIG_64BIT */
7658
7659
7660diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
7661index 47f11c7..3420df2 100644
7662--- a/arch/parisc/include/asm/cache.h
7663+++ b/arch/parisc/include/asm/cache.h
7664@@ -5,6 +5,7 @@
7665 #ifndef __ARCH_PARISC_CACHE_H
7666 #define __ARCH_PARISC_CACHE_H
7667
7668+#include <linux/const.h>
7669
7670 /*
7671 * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have
7672@@ -15,13 +16,13 @@
7673 * just ruin performance.
7674 */
7675 #ifdef CONFIG_PA20
7676-#define L1_CACHE_BYTES 64
7677 #define L1_CACHE_SHIFT 6
7678 #else
7679-#define L1_CACHE_BYTES 32
7680 #define L1_CACHE_SHIFT 5
7681 #endif
7682
7683+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7684+
7685 #ifndef __ASSEMBLY__
7686
7687 #define SMP_CACHE_BYTES L1_CACHE_BYTES
7688diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
7689index 3391d06..c23a2cc 100644
7690--- a/arch/parisc/include/asm/elf.h
7691+++ b/arch/parisc/include/asm/elf.h
7692@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
7693
7694 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
7695
7696+#ifdef CONFIG_PAX_ASLR
7697+#define PAX_ELF_ET_DYN_BASE 0x10000UL
7698+
7699+#define PAX_DELTA_MMAP_LEN 16
7700+#define PAX_DELTA_STACK_LEN 16
7701+#endif
7702+
7703 /* This yields a mask that user programs can use to figure out what
7704 instruction set this CPU supports. This could be done in user space,
7705 but it's not easy, and we've already done it here. */
7706diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
7707index f213f5b..0af3e8e 100644
7708--- a/arch/parisc/include/asm/pgalloc.h
7709+++ b/arch/parisc/include/asm/pgalloc.h
7710@@ -61,6 +61,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
7711 (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
7712 }
7713
7714+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
7715+{
7716+ pgd_populate(mm, pgd, pmd);
7717+}
7718+
7719 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
7720 {
7721 pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT,
7722@@ -93,6 +98,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
7723 #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
7724 #define pmd_free(mm, x) do { } while (0)
7725 #define pgd_populate(mm, pmd, pte) BUG()
7726+#define pgd_populate_kernel(mm, pmd, pte) BUG()
7727
7728 #endif
7729
7730diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
7731index 22b89d1..ce34230 100644
7732--- a/arch/parisc/include/asm/pgtable.h
7733+++ b/arch/parisc/include/asm/pgtable.h
7734@@ -223,6 +223,17 @@ extern void purge_tlb_entries(struct mm_struct *, unsigned long);
7735 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
7736 #define PAGE_COPY PAGE_EXECREAD
7737 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
7738+
7739+#ifdef CONFIG_PAX_PAGEEXEC
7740+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
7741+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
7742+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
7743+#else
7744+# define PAGE_SHARED_NOEXEC PAGE_SHARED
7745+# define PAGE_COPY_NOEXEC PAGE_COPY
7746+# define PAGE_READONLY_NOEXEC PAGE_READONLY
7747+#endif
7748+
7749 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
7750 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
7751 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
7752diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
7753index 4006964..fcb3cc2 100644
7754--- a/arch/parisc/include/asm/uaccess.h
7755+++ b/arch/parisc/include/asm/uaccess.h
7756@@ -246,10 +246,10 @@ static inline unsigned long __must_check copy_from_user(void *to,
7757 const void __user *from,
7758 unsigned long n)
7759 {
7760- int sz = __compiletime_object_size(to);
7761+ size_t sz = __compiletime_object_size(to);
7762 int ret = -EFAULT;
7763
7764- if (likely(sz == -1 || !__builtin_constant_p(n) || sz >= n))
7765+ if (likely(sz == (size_t)-1 || !__builtin_constant_p(n) || sz >= n))
7766 ret = __copy_from_user(to, from, n);
7767 else
7768 copy_from_user_overflow();
7769diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
7770index 50dfafc..b9fc230 100644
7771--- a/arch/parisc/kernel/module.c
7772+++ b/arch/parisc/kernel/module.c
7773@@ -98,16 +98,38 @@
7774
7775 /* three functions to determine where in the module core
7776 * or init pieces the location is */
7777+static inline int in_init_rx(struct module *me, void *loc)
7778+{
7779+ return (loc >= me->module_init_rx &&
7780+ loc < (me->module_init_rx + me->init_size_rx));
7781+}
7782+
7783+static inline int in_init_rw(struct module *me, void *loc)
7784+{
7785+ return (loc >= me->module_init_rw &&
7786+ loc < (me->module_init_rw + me->init_size_rw));
7787+}
7788+
7789 static inline int in_init(struct module *me, void *loc)
7790 {
7791- return (loc >= me->module_init &&
7792- loc <= (me->module_init + me->init_size));
7793+ return in_init_rx(me, loc) || in_init_rw(me, loc);
7794+}
7795+
7796+static inline int in_core_rx(struct module *me, void *loc)
7797+{
7798+ return (loc >= me->module_core_rx &&
7799+ loc < (me->module_core_rx + me->core_size_rx));
7800+}
7801+
7802+static inline int in_core_rw(struct module *me, void *loc)
7803+{
7804+ return (loc >= me->module_core_rw &&
7805+ loc < (me->module_core_rw + me->core_size_rw));
7806 }
7807
7808 static inline int in_core(struct module *me, void *loc)
7809 {
7810- return (loc >= me->module_core &&
7811- loc <= (me->module_core + me->core_size));
7812+ return in_core_rx(me, loc) || in_core_rw(me, loc);
7813 }
7814
7815 static inline int in_local(struct module *me, void *loc)
7816@@ -371,13 +393,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
7817 }
7818
7819 /* align things a bit */
7820- me->core_size = ALIGN(me->core_size, 16);
7821- me->arch.got_offset = me->core_size;
7822- me->core_size += gots * sizeof(struct got_entry);
7823+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
7824+ me->arch.got_offset = me->core_size_rw;
7825+ me->core_size_rw += gots * sizeof(struct got_entry);
7826
7827- me->core_size = ALIGN(me->core_size, 16);
7828- me->arch.fdesc_offset = me->core_size;
7829- me->core_size += fdescs * sizeof(Elf_Fdesc);
7830+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
7831+ me->arch.fdesc_offset = me->core_size_rw;
7832+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
7833
7834 me->arch.got_max = gots;
7835 me->arch.fdesc_max = fdescs;
7836@@ -395,7 +417,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
7837
7838 BUG_ON(value == 0);
7839
7840- got = me->module_core + me->arch.got_offset;
7841+ got = me->module_core_rw + me->arch.got_offset;
7842 for (i = 0; got[i].addr; i++)
7843 if (got[i].addr == value)
7844 goto out;
7845@@ -413,7 +435,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
7846 #ifdef CONFIG_64BIT
7847 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
7848 {
7849- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
7850+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
7851
7852 if (!value) {
7853 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
7854@@ -431,7 +453,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
7855
7856 /* Create new one */
7857 fdesc->addr = value;
7858- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
7859+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
7860 return (Elf_Addr)fdesc;
7861 }
7862 #endif /* CONFIG_64BIT */
7863@@ -843,7 +865,7 @@ register_unwind_table(struct module *me,
7864
7865 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
7866 end = table + sechdrs[me->arch.unwind_section].sh_size;
7867- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
7868+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
7869
7870 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
7871 me->arch.unwind_section, table, end, gp);
7872diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
7873index e1ffea2..46ed66e 100644
7874--- a/arch/parisc/kernel/sys_parisc.c
7875+++ b/arch/parisc/kernel/sys_parisc.c
7876@@ -89,6 +89,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7877 unsigned long task_size = TASK_SIZE;
7878 int do_color_align, last_mmap;
7879 struct vm_unmapped_area_info info;
7880+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
7881
7882 if (len > task_size)
7883 return -ENOMEM;
7884@@ -106,6 +107,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7885 goto found_addr;
7886 }
7887
7888+#ifdef CONFIG_PAX_RANDMMAP
7889+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
7890+#endif
7891+
7892 if (addr) {
7893 if (do_color_align && last_mmap)
7894 addr = COLOR_ALIGN(addr, last_mmap, pgoff);
7895@@ -124,6 +129,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7896 info.high_limit = mmap_upper_limit();
7897 info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
7898 info.align_offset = shared_align_offset(last_mmap, pgoff);
7899+ info.threadstack_offset = offset;
7900 addr = vm_unmapped_area(&info);
7901
7902 found_addr:
7903@@ -143,6 +149,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7904 unsigned long addr = addr0;
7905 int do_color_align, last_mmap;
7906 struct vm_unmapped_area_info info;
7907+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
7908
7909 #ifdef CONFIG_64BIT
7910 /* This should only ever run for 32-bit processes. */
7911@@ -167,6 +174,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7912 }
7913
7914 /* requesting a specific address */
7915+#ifdef CONFIG_PAX_RANDMMAP
7916+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
7917+#endif
7918+
7919 if (addr) {
7920 if (do_color_align && last_mmap)
7921 addr = COLOR_ALIGN(addr, last_mmap, pgoff);
7922@@ -184,6 +195,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7923 info.high_limit = mm->mmap_base;
7924 info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
7925 info.align_offset = shared_align_offset(last_mmap, pgoff);
7926+ info.threadstack_offset = offset;
7927 addr = vm_unmapped_area(&info);
7928 if (!(addr & ~PAGE_MASK))
7929 goto found_addr;
7930@@ -249,6 +261,13 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7931 mm->mmap_legacy_base = mmap_legacy_base();
7932 mm->mmap_base = mmap_upper_limit();
7933
7934+#ifdef CONFIG_PAX_RANDMMAP
7935+ if (mm->pax_flags & MF_PAX_RANDMMAP) {
7936+ mm->mmap_legacy_base += mm->delta_mmap;
7937+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
7938+ }
7939+#endif
7940+
7941 if (mmap_is_legacy()) {
7942 mm->mmap_base = mm->mmap_legacy_base;
7943 mm->get_unmapped_area = arch_get_unmapped_area;
7944diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
7945index 47ee620..1107387 100644
7946--- a/arch/parisc/kernel/traps.c
7947+++ b/arch/parisc/kernel/traps.c
7948@@ -726,9 +726,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
7949
7950 down_read(&current->mm->mmap_sem);
7951 vma = find_vma(current->mm,regs->iaoq[0]);
7952- if (vma && (regs->iaoq[0] >= vma->vm_start)
7953- && (vma->vm_flags & VM_EXEC)) {
7954-
7955+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
7956 fault_address = regs->iaoq[0];
7957 fault_space = regs->iasq[0];
7958
7959diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
7960index 3ca9c11..d163ef7 100644
7961--- a/arch/parisc/mm/fault.c
7962+++ b/arch/parisc/mm/fault.c
7963@@ -15,6 +15,7 @@
7964 #include <linux/sched.h>
7965 #include <linux/interrupt.h>
7966 #include <linux/module.h>
7967+#include <linux/unistd.h>
7968
7969 #include <asm/uaccess.h>
7970 #include <asm/traps.h>
7971@@ -50,7 +51,7 @@ int show_unhandled_signals = 1;
7972 static unsigned long
7973 parisc_acctyp(unsigned long code, unsigned int inst)
7974 {
7975- if (code == 6 || code == 16)
7976+ if (code == 6 || code == 7 || code == 16)
7977 return VM_EXEC;
7978
7979 switch (inst & 0xf0000000) {
7980@@ -136,6 +137,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
7981 }
7982 #endif
7983
7984+#ifdef CONFIG_PAX_PAGEEXEC
7985+/*
7986+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
7987+ *
7988+ * returns 1 when task should be killed
7989+ * 2 when rt_sigreturn trampoline was detected
7990+ * 3 when unpatched PLT trampoline was detected
7991+ */
7992+static int pax_handle_fetch_fault(struct pt_regs *regs)
7993+{
7994+
7995+#ifdef CONFIG_PAX_EMUPLT
7996+ int err;
7997+
7998+ do { /* PaX: unpatched PLT emulation */
7999+ unsigned int bl, depwi;
8000+
8001+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
8002+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
8003+
8004+ if (err)
8005+ break;
8006+
8007+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
8008+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
8009+
8010+ err = get_user(ldw, (unsigned int *)addr);
8011+ err |= get_user(bv, (unsigned int *)(addr+4));
8012+ err |= get_user(ldw2, (unsigned int *)(addr+8));
8013+
8014+ if (err)
8015+ break;
8016+
8017+ if (ldw == 0x0E801096U &&
8018+ bv == 0xEAC0C000U &&
8019+ ldw2 == 0x0E881095U)
8020+ {
8021+ unsigned int resolver, map;
8022+
8023+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
8024+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
8025+ if (err)
8026+ break;
8027+
8028+ regs->gr[20] = instruction_pointer(regs)+8;
8029+ regs->gr[21] = map;
8030+ regs->gr[22] = resolver;
8031+ regs->iaoq[0] = resolver | 3UL;
8032+ regs->iaoq[1] = regs->iaoq[0] + 4;
8033+ return 3;
8034+ }
8035+ }
8036+ } while (0);
8037+#endif
8038+
8039+#ifdef CONFIG_PAX_EMUTRAMP
8040+
8041+#ifndef CONFIG_PAX_EMUSIGRT
8042+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
8043+ return 1;
8044+#endif
8045+
8046+ do { /* PaX: rt_sigreturn emulation */
8047+ unsigned int ldi1, ldi2, bel, nop;
8048+
8049+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
8050+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
8051+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
8052+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
8053+
8054+ if (err)
8055+ break;
8056+
8057+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
8058+ ldi2 == 0x3414015AU &&
8059+ bel == 0xE4008200U &&
8060+ nop == 0x08000240U)
8061+ {
8062+ regs->gr[25] = (ldi1 & 2) >> 1;
8063+ regs->gr[20] = __NR_rt_sigreturn;
8064+ regs->gr[31] = regs->iaoq[1] + 16;
8065+ regs->sr[0] = regs->iasq[1];
8066+ regs->iaoq[0] = 0x100UL;
8067+ regs->iaoq[1] = regs->iaoq[0] + 4;
8068+ regs->iasq[0] = regs->sr[2];
8069+ regs->iasq[1] = regs->sr[2];
8070+ return 2;
8071+ }
8072+ } while (0);
8073+#endif
8074+
8075+ return 1;
8076+}
8077+
8078+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
8079+{
8080+ unsigned long i;
8081+
8082+ printk(KERN_ERR "PAX: bytes at PC: ");
8083+ for (i = 0; i < 5; i++) {
8084+ unsigned int c;
8085+ if (get_user(c, (unsigned int *)pc+i))
8086+ printk(KERN_CONT "???????? ");
8087+ else
8088+ printk(KERN_CONT "%08x ", c);
8089+ }
8090+ printk("\n");
8091+}
8092+#endif
8093+
8094 int fixup_exception(struct pt_regs *regs)
8095 {
8096 const struct exception_table_entry *fix;
8097@@ -234,8 +345,33 @@ retry:
8098
8099 good_area:
8100
8101- if ((vma->vm_flags & acc_type) != acc_type)
8102+ if ((vma->vm_flags & acc_type) != acc_type) {
8103+
8104+#ifdef CONFIG_PAX_PAGEEXEC
8105+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
8106+ (address & ~3UL) == instruction_pointer(regs))
8107+ {
8108+ up_read(&mm->mmap_sem);
8109+ switch (pax_handle_fetch_fault(regs)) {
8110+
8111+#ifdef CONFIG_PAX_EMUPLT
8112+ case 3:
8113+ return;
8114+#endif
8115+
8116+#ifdef CONFIG_PAX_EMUTRAMP
8117+ case 2:
8118+ return;
8119+#endif
8120+
8121+ }
8122+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
8123+ do_group_exit(SIGKILL);
8124+ }
8125+#endif
8126+
8127 goto bad_area;
8128+ }
8129
8130 /*
8131 * If for any reason at all we couldn't handle the fault, make
8132diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
8133index 4bc7b62..107e0b2 100644
8134--- a/arch/powerpc/Kconfig
8135+++ b/arch/powerpc/Kconfig
8136@@ -399,6 +399,7 @@ config PPC64_SUPPORTS_MEMORY_FAILURE
8137 config KEXEC
8138 bool "kexec system call"
8139 depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP))
8140+ depends on !GRKERNSEC_KMEM
8141 help
8142 kexec is a system call that implements the ability to shutdown your
8143 current kernel, and to start another kernel. It is like a reboot
8144diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
8145index 28992d0..434c881 100644
8146--- a/arch/powerpc/include/asm/atomic.h
8147+++ b/arch/powerpc/include/asm/atomic.h
8148@@ -12,6 +12,11 @@
8149
8150 #define ATOMIC_INIT(i) { (i) }
8151
8152+#define _ASM_EXTABLE(from, to) \
8153+" .section __ex_table,\"a\"\n" \
8154+ PPC_LONG" " #from ", " #to"\n" \
8155+" .previous\n"
8156+
8157 static __inline__ int atomic_read(const atomic_t *v)
8158 {
8159 int t;
8160@@ -21,16 +26,61 @@ static __inline__ int atomic_read(const atomic_t *v)
8161 return t;
8162 }
8163
8164+static __inline__ int atomic_read_unchecked(const atomic_unchecked_t *v)
8165+{
8166+ int t;
8167+
8168+ __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
8169+
8170+ return t;
8171+}
8172+
8173 static __inline__ void atomic_set(atomic_t *v, int i)
8174 {
8175 __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
8176 }
8177
8178+static __inline__ void atomic_set_unchecked(atomic_unchecked_t *v, int i)
8179+{
8180+ __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
8181+}
8182+
8183 static __inline__ void atomic_add(int a, atomic_t *v)
8184 {
8185 int t;
8186
8187 __asm__ __volatile__(
8188+"1: lwarx %0,0,%3 # atomic_add\n"
8189+
8190+#ifdef CONFIG_PAX_REFCOUNT
8191+" mcrxr cr0\n"
8192+" addo. %0,%2,%0\n"
8193+" bf 4*cr0+so, 3f\n"
8194+"2:.long " "0x00c00b00""\n"
8195+#else
8196+" add %0,%2,%0\n"
8197+#endif
8198+
8199+"3:\n"
8200+ PPC405_ERR77(0,%3)
8201+" stwcx. %0,0,%3 \n\
8202+ bne- 1b"
8203+
8204+#ifdef CONFIG_PAX_REFCOUNT
8205+"\n4:\n"
8206+ _ASM_EXTABLE(2b, 4b)
8207+#endif
8208+
8209+ : "=&r" (t), "+m" (v->counter)
8210+ : "r" (a), "r" (&v->counter)
8211+ : "cc");
8212+}
8213+
8214+static __inline__ void atomic_add_unchecked(int a, atomic_unchecked_t *v)
8215+{
8216+ int t;
8217+
8218+ __asm__ __volatile__(
8219 "1: lwarx %0,0,%3 # atomic_add\n\
8220 add %0,%2,%0\n"
8221 PPC405_ERR77(0,%3)
8222@@ -41,12 +91,49 @@ static __inline__ void atomic_add(int a, atomic_t *v)
8223 : "cc");
8224 }
8225
8226+/* Same as atomic_add but return the value */
8227 static __inline__ int atomic_add_return(int a, atomic_t *v)
8228 {
8229 int t;
8230
8231 __asm__ __volatile__(
8232 PPC_ATOMIC_ENTRY_BARRIER
8233+"1: lwarx %0,0,%2 # atomic_add_return\n"
8234+
8235+#ifdef CONFIG_PAX_REFCOUNT
8236+" mcrxr cr0\n"
8237+" addo. %0,%1,%0\n"
8238+" bf 4*cr0+so, 3f\n"
8239+"2:.long " "0x00c00b00""\n"
8240+#else
8241+" add %0,%1,%0\n"
8242+#endif
8243+
8244+"3:\n"
8245+ PPC405_ERR77(0,%2)
8246+" stwcx. %0,0,%2 \n\
8247+ bne- 1b\n"
8248+"4:"
8249+
8250+#ifdef CONFIG_PAX_REFCOUNT
8251+ _ASM_EXTABLE(2b, 4b)
8252+#endif
8253+
8254+ PPC_ATOMIC_EXIT_BARRIER
8255+ : "=&r" (t)
8256+ : "r" (a), "r" (&v->counter)
8257+ : "cc", "memory");
8258+
8259+ return t;
8260+}
8261+
8262+/* Same as atomic_add_unchecked but return the value */
8263+static __inline__ int atomic_add_return_unchecked(int a, atomic_unchecked_t *v)
8264+{
8265+ int t;
8266+
8267+ __asm__ __volatile__(
8268+ PPC_ATOMIC_ENTRY_BARRIER
8269 "1: lwarx %0,0,%2 # atomic_add_return\n\
8270 add %0,%1,%0\n"
8271 PPC405_ERR77(0,%2)
8272@@ -67,6 +154,37 @@ static __inline__ void atomic_sub(int a, atomic_t *v)
8273 int t;
8274
8275 __asm__ __volatile__(
8276+"1: lwarx %0,0,%3 # atomic_sub\n"
8277+
8278+#ifdef CONFIG_PAX_REFCOUNT
8279+" mcrxr cr0\n"
8280+" subfo. %0,%2,%0\n"
8281+" bf 4*cr0+so, 3f\n"
8282+"2:.long " "0x00c00b00""\n"
8283+#else
8284+" subf %0,%2,%0\n"
8285+#endif
8286+
8287+"3:\n"
8288+ PPC405_ERR77(0,%3)
8289+" stwcx. %0,0,%3 \n\
8290+ bne- 1b\n"
8291+"4:"
8292+
8293+#ifdef CONFIG_PAX_REFCOUNT
8294+ _ASM_EXTABLE(2b, 4b)
8295+#endif
8296+
8297+ : "=&r" (t), "+m" (v->counter)
8298+ : "r" (a), "r" (&v->counter)
8299+ : "cc");
8300+}
8301+
8302+static __inline__ void atomic_sub_unchecked(int a, atomic_unchecked_t *v)
8303+{
8304+ int t;
8305+
8306+ __asm__ __volatile__(
8307 "1: lwarx %0,0,%3 # atomic_sub\n\
8308 subf %0,%2,%0\n"
8309 PPC405_ERR77(0,%3)
8310@@ -77,12 +195,49 @@ static __inline__ void atomic_sub(int a, atomic_t *v)
8311 : "cc");
8312 }
8313
8314+/* Same as atomic_sub but return the value */
8315 static __inline__ int atomic_sub_return(int a, atomic_t *v)
8316 {
8317 int t;
8318
8319 __asm__ __volatile__(
8320 PPC_ATOMIC_ENTRY_BARRIER
8321+"1: lwarx %0,0,%2 # atomic_sub_return\n"
8322+
8323+#ifdef CONFIG_PAX_REFCOUNT
8324+" mcrxr cr0\n"
8325+" subfo. %0,%1,%0\n"
8326+" bf 4*cr0+so, 3f\n"
8327+"2:.long " "0x00c00b00""\n"
8328+#else
8329+" subf %0,%1,%0\n"
8330+#endif
8331+
8332+"3:\n"
8333+ PPC405_ERR77(0,%2)
8334+" stwcx. %0,0,%2 \n\
8335+ bne- 1b\n"
8336+ PPC_ATOMIC_EXIT_BARRIER
8337+"4:"
8338+
8339+#ifdef CONFIG_PAX_REFCOUNT
8340+ _ASM_EXTABLE(2b, 4b)
8341+#endif
8342+
8343+ : "=&r" (t)
8344+ : "r" (a), "r" (&v->counter)
8345+ : "cc", "memory");
8346+
8347+ return t;
8348+}
8349+
8350+/* Same as atomic_sub_unchecked but return the value */
8351+static __inline__ int atomic_sub_return_unchecked(int a, atomic_unchecked_t *v)
8352+{
8353+ int t;
8354+
8355+ __asm__ __volatile__(
8356+ PPC_ATOMIC_ENTRY_BARRIER
8357 "1: lwarx %0,0,%2 # atomic_sub_return\n\
8358 subf %0,%1,%0\n"
8359 PPC405_ERR77(0,%2)
8360@@ -96,38 +251,23 @@ static __inline__ int atomic_sub_return(int a, atomic_t *v)
8361 return t;
8362 }
8363
8364-static __inline__ void atomic_inc(atomic_t *v)
8365-{
8366- int t;
8367+/*
8368+ * atomic_inc - increment atomic variable
8369+ * @v: pointer of type atomic_t
8370+ *
8371+ * Automatically increments @v by 1
8372+ */
8373+#define atomic_inc(v) atomic_add(1, (v))
8374+#define atomic_inc_return(v) atomic_add_return(1, (v))
8375
8376- __asm__ __volatile__(
8377-"1: lwarx %0,0,%2 # atomic_inc\n\
8378- addic %0,%0,1\n"
8379- PPC405_ERR77(0,%2)
8380-" stwcx. %0,0,%2 \n\
8381- bne- 1b"
8382- : "=&r" (t), "+m" (v->counter)
8383- : "r" (&v->counter)
8384- : "cc", "xer");
8385+static __inline__ void atomic_inc_unchecked(atomic_unchecked_t *v)
8386+{
8387+ atomic_add_unchecked(1, v);
8388 }
8389
8390-static __inline__ int atomic_inc_return(atomic_t *v)
8391+static __inline__ int atomic_inc_return_unchecked(atomic_unchecked_t *v)
8392 {
8393- int t;
8394-
8395- __asm__ __volatile__(
8396- PPC_ATOMIC_ENTRY_BARRIER
8397-"1: lwarx %0,0,%1 # atomic_inc_return\n\
8398- addic %0,%0,1\n"
8399- PPC405_ERR77(0,%1)
8400-" stwcx. %0,0,%1 \n\
8401- bne- 1b"
8402- PPC_ATOMIC_EXIT_BARRIER
8403- : "=&r" (t)
8404- : "r" (&v->counter)
8405- : "cc", "xer", "memory");
8406-
8407- return t;
8408+ return atomic_add_return_unchecked(1, v);
8409 }
8410
8411 /*
8412@@ -140,43 +280,38 @@ static __inline__ int atomic_inc_return(atomic_t *v)
8413 */
8414 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
8415
8416-static __inline__ void atomic_dec(atomic_t *v)
8417+static __inline__ int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
8418 {
8419- int t;
8420-
8421- __asm__ __volatile__(
8422-"1: lwarx %0,0,%2 # atomic_dec\n\
8423- addic %0,%0,-1\n"
8424- PPC405_ERR77(0,%2)\
8425-" stwcx. %0,0,%2\n\
8426- bne- 1b"
8427- : "=&r" (t), "+m" (v->counter)
8428- : "r" (&v->counter)
8429- : "cc", "xer");
8430+ return atomic_add_return_unchecked(1, v) == 0;
8431 }
8432
8433-static __inline__ int atomic_dec_return(atomic_t *v)
8434+/*
8435+ * atomic_dec - decrement atomic variable
8436+ * @v: pointer of type atomic_t
8437+ *
8438+ * Atomically decrements @v by 1
8439+ */
8440+#define atomic_dec(v) atomic_sub(1, (v))
8441+#define atomic_dec_return(v) atomic_sub_return(1, (v))
8442+
8443+static __inline__ void atomic_dec_unchecked(atomic_unchecked_t *v)
8444 {
8445- int t;
8446-
8447- __asm__ __volatile__(
8448- PPC_ATOMIC_ENTRY_BARRIER
8449-"1: lwarx %0,0,%1 # atomic_dec_return\n\
8450- addic %0,%0,-1\n"
8451- PPC405_ERR77(0,%1)
8452-" stwcx. %0,0,%1\n\
8453- bne- 1b"
8454- PPC_ATOMIC_EXIT_BARRIER
8455- : "=&r" (t)
8456- : "r" (&v->counter)
8457- : "cc", "xer", "memory");
8458-
8459- return t;
8460+ atomic_sub_unchecked(1, v);
8461 }
8462
8463 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
8464 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
8465
8466+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
8467+{
8468+ return cmpxchg(&(v->counter), old, new);
8469+}
8470+
8471+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
8472+{
8473+ return xchg(&(v->counter), new);
8474+}
8475+
8476 /**
8477 * __atomic_add_unless - add unless the number is a given value
8478 * @v: pointer of type atomic_t
8479@@ -271,6 +406,11 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v)
8480 }
8481 #define atomic_dec_if_positive atomic_dec_if_positive
8482
8483+#define smp_mb__before_atomic_dec() smp_mb()
8484+#define smp_mb__after_atomic_dec() smp_mb()
8485+#define smp_mb__before_atomic_inc() smp_mb()
8486+#define smp_mb__after_atomic_inc() smp_mb()
8487+
8488 #ifdef __powerpc64__
8489
8490 #define ATOMIC64_INIT(i) { (i) }
8491@@ -284,11 +424,25 @@ static __inline__ long atomic64_read(const atomic64_t *v)
8492 return t;
8493 }
8494
8495+static __inline__ long atomic64_read_unchecked(const atomic64_unchecked_t *v)
8496+{
8497+ long t;
8498+
8499+ __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
8500+
8501+ return t;
8502+}
8503+
8504 static __inline__ void atomic64_set(atomic64_t *v, long i)
8505 {
8506 __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
8507 }
8508
8509+static __inline__ void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
8510+{
8511+ __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
8512+}
8513+
8514 static __inline__ void atomic64_add(long a, atomic64_t *v)
8515 {
8516 long t;
8517@@ -303,12 +457,76 @@ static __inline__ void atomic64_add(long a, atomic64_t *v)
8518 : "cc");
8519 }
8520
8521+static __inline__ void atomic64_add_unchecked(long a, atomic64_unchecked_t *v)
8522+{
8523+ long t;
8524+
8525+ __asm__ __volatile__(
8526+"1: ldarx %0,0,%3 # atomic64_add\n"
8527+
8528+#ifdef CONFIG_PAX_REFCOUNT
8529+" mcrxr cr0\n"
8530+" addo. %0,%2,%0\n"
8531+" bf 4*cr0+so, 3f\n"
8532+"2:.long " "0x00c00b00""\n"
8533+#else
8534+" add %0,%2,%0\n"
8535+#endif
8536+
8537+"3:\n"
8538+" stdcx. %0,0,%3 \n\
8539+ bne- 1b\n"
8540+"4:"
8541+
8542+#ifdef CONFIG_PAX_REFCOUNT
8543+ _ASM_EXTABLE(2b, 4b)
8544+#endif
8545+
8546+ : "=&r" (t), "+m" (v->counter)
8547+ : "r" (a), "r" (&v->counter)
8548+ : "cc");
8549+}
8550+
8551 static __inline__ long atomic64_add_return(long a, atomic64_t *v)
8552 {
8553 long t;
8554
8555 __asm__ __volatile__(
8556 PPC_ATOMIC_ENTRY_BARRIER
8557+"1: ldarx %0,0,%2 # atomic64_add_return\n"
8558+
8559+#ifdef CONFIG_PAX_REFCOUNT
8560+" mcrxr cr0\n"
8561+" addo. %0,%1,%0\n"
8562+" bf 4*cr0+so, 3f\n"
8563+"2:.long " "0x00c00b00""\n"
8564+#else
8565+" add %0,%1,%0\n"
8566+#endif
8567+
8568+"3:\n"
8569+" stdcx. %0,0,%2 \n\
8570+ bne- 1b\n"
8571+ PPC_ATOMIC_EXIT_BARRIER
8572+"4:"
8573+
8574+#ifdef CONFIG_PAX_REFCOUNT
8575+ _ASM_EXTABLE(2b, 4b)
8576+#endif
8577+
8578+ : "=&r" (t)
8579+ : "r" (a), "r" (&v->counter)
8580+ : "cc", "memory");
8581+
8582+ return t;
8583+}
8584+
8585+static __inline__ long atomic64_add_return_unchecked(long a, atomic64_unchecked_t *v)
8586+{
8587+ long t;
8588+
8589+ __asm__ __volatile__(
8590+ PPC_ATOMIC_ENTRY_BARRIER
8591 "1: ldarx %0,0,%2 # atomic64_add_return\n\
8592 add %0,%1,%0\n\
8593 stdcx. %0,0,%2 \n\
8594@@ -328,6 +546,36 @@ static __inline__ void atomic64_sub(long a, atomic64_t *v)
8595 long t;
8596
8597 __asm__ __volatile__(
8598+"1: ldarx %0,0,%3 # atomic64_sub\n"
8599+
8600+#ifdef CONFIG_PAX_REFCOUNT
8601+" mcrxr cr0\n"
8602+" subfo. %0,%2,%0\n"
8603+" bf 4*cr0+so, 3f\n"
8604+"2:.long " "0x00c00b00""\n"
8605+#else
8606+" subf %0,%2,%0\n"
8607+#endif
8608+
8609+"3:\n"
8610+" stdcx. %0,0,%3 \n\
8611+ bne- 1b"
8612+"4:"
8613+
8614+#ifdef CONFIG_PAX_REFCOUNT
8615+ _ASM_EXTABLE(2b, 4b)
8616+#endif
8617+
8618+ : "=&r" (t), "+m" (v->counter)
8619+ : "r" (a), "r" (&v->counter)
8620+ : "cc");
8621+}
8622+
8623+static __inline__ void atomic64_sub_unchecked(long a, atomic64_unchecked_t *v)
8624+{
8625+ long t;
8626+
8627+ __asm__ __volatile__(
8628 "1: ldarx %0,0,%3 # atomic64_sub\n\
8629 subf %0,%2,%0\n\
8630 stdcx. %0,0,%3 \n\
8631@@ -343,6 +591,40 @@ static __inline__ long atomic64_sub_return(long a, atomic64_t *v)
8632
8633 __asm__ __volatile__(
8634 PPC_ATOMIC_ENTRY_BARRIER
8635+"1: ldarx %0,0,%2 # atomic64_sub_return\n"
8636+
8637+#ifdef CONFIG_PAX_REFCOUNT
8638+" mcrxr cr0\n"
8639+" subfo. %0,%1,%0\n"
8640+" bf 4*cr0+so, 3f\n"
8641+"2:.long " "0x00c00b00""\n"
8642+#else
8643+" subf %0,%1,%0\n"
8644+#endif
8645+
8646+"3:\n"
8647+" stdcx. %0,0,%2 \n\
8648+ bne- 1b\n"
8649+ PPC_ATOMIC_EXIT_BARRIER
8650+"4:"
8651+
8652+#ifdef CONFIG_PAX_REFCOUNT
8653+ _ASM_EXTABLE(2b, 4b)
8654+#endif
8655+
8656+ : "=&r" (t)
8657+ : "r" (a), "r" (&v->counter)
8658+ : "cc", "memory");
8659+
8660+ return t;
8661+}
8662+
8663+static __inline__ long atomic64_sub_return_unchecked(long a, atomic64_unchecked_t *v)
8664+{
8665+ long t;
8666+
8667+ __asm__ __volatile__(
8668+ PPC_ATOMIC_ENTRY_BARRIER
8669 "1: ldarx %0,0,%2 # atomic64_sub_return\n\
8670 subf %0,%1,%0\n\
8671 stdcx. %0,0,%2 \n\
8672@@ -355,36 +637,23 @@ static __inline__ long atomic64_sub_return(long a, atomic64_t *v)
8673 return t;
8674 }
8675
8676-static __inline__ void atomic64_inc(atomic64_t *v)
8677-{
8678- long t;
8679+/*
8680+ * atomic64_inc - increment atomic variable
8681+ * @v: pointer of type atomic64_t
8682+ *
8683+ * Automatically increments @v by 1
8684+ */
8685+#define atomic64_inc(v) atomic64_add(1, (v))
8686+#define atomic64_inc_return(v) atomic64_add_return(1, (v))
8687
8688- __asm__ __volatile__(
8689-"1: ldarx %0,0,%2 # atomic64_inc\n\
8690- addic %0,%0,1\n\
8691- stdcx. %0,0,%2 \n\
8692- bne- 1b"
8693- : "=&r" (t), "+m" (v->counter)
8694- : "r" (&v->counter)
8695- : "cc", "xer");
8696+static __inline__ void atomic64_inc_unchecked(atomic64_unchecked_t *v)
8697+{
8698+ atomic64_add_unchecked(1, v);
8699 }
8700
8701-static __inline__ long atomic64_inc_return(atomic64_t *v)
8702+static __inline__ int atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
8703 {
8704- long t;
8705-
8706- __asm__ __volatile__(
8707- PPC_ATOMIC_ENTRY_BARRIER
8708-"1: ldarx %0,0,%1 # atomic64_inc_return\n\
8709- addic %0,%0,1\n\
8710- stdcx. %0,0,%1 \n\
8711- bne- 1b"
8712- PPC_ATOMIC_EXIT_BARRIER
8713- : "=&r" (t)
8714- : "r" (&v->counter)
8715- : "cc", "xer", "memory");
8716-
8717- return t;
8718+ return atomic64_add_return_unchecked(1, v);
8719 }
8720
8721 /*
8722@@ -397,36 +666,18 @@ static __inline__ long atomic64_inc_return(atomic64_t *v)
8723 */
8724 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
8725
8726-static __inline__ void atomic64_dec(atomic64_t *v)
8727+/*
8728+ * atomic64_dec - decrement atomic variable
8729+ * @v: pointer of type atomic64_t
8730+ *
8731+ * Atomically decrements @v by 1
8732+ */
8733+#define atomic64_dec(v) atomic64_sub(1, (v))
8734+#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
8735+
8736+static __inline__ void atomic64_dec_unchecked(atomic64_unchecked_t *v)
8737 {
8738- long t;
8739-
8740- __asm__ __volatile__(
8741-"1: ldarx %0,0,%2 # atomic64_dec\n\
8742- addic %0,%0,-1\n\
8743- stdcx. %0,0,%2\n\
8744- bne- 1b"
8745- : "=&r" (t), "+m" (v->counter)
8746- : "r" (&v->counter)
8747- : "cc", "xer");
8748-}
8749-
8750-static __inline__ long atomic64_dec_return(atomic64_t *v)
8751-{
8752- long t;
8753-
8754- __asm__ __volatile__(
8755- PPC_ATOMIC_ENTRY_BARRIER
8756-"1: ldarx %0,0,%1 # atomic64_dec_return\n\
8757- addic %0,%0,-1\n\
8758- stdcx. %0,0,%1\n\
8759- bne- 1b"
8760- PPC_ATOMIC_EXIT_BARRIER
8761- : "=&r" (t)
8762- : "r" (&v->counter)
8763- : "cc", "xer", "memory");
8764-
8765- return t;
8766+ atomic64_sub_unchecked(1, v);
8767 }
8768
8769 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
8770@@ -459,6 +710,16 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
8771 #define atomic64_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
8772 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
8773
8774+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
8775+{
8776+ return cmpxchg(&(v->counter), old, new);
8777+}
8778+
8779+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
8780+{
8781+ return xchg(&(v->counter), new);
8782+}
8783+
8784 /**
8785 * atomic64_add_unless - add unless the number is a given value
8786 * @v: pointer of type atomic64_t
8787diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h
8788index bab79a1..4a3eabc 100644
8789--- a/arch/powerpc/include/asm/barrier.h
8790+++ b/arch/powerpc/include/asm/barrier.h
8791@@ -73,7 +73,7 @@
8792 do { \
8793 compiletime_assert_atomic_type(*p); \
8794 __lwsync(); \
8795- ACCESS_ONCE(*p) = (v); \
8796+ ACCESS_ONCE_RW(*p) = (v); \
8797 } while (0)
8798
8799 #define smp_load_acquire(p) \
8800diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
8801index 34a05a1..a1f2c67 100644
8802--- a/arch/powerpc/include/asm/cache.h
8803+++ b/arch/powerpc/include/asm/cache.h
8804@@ -4,6 +4,7 @@
8805 #ifdef __KERNEL__
8806
8807 #include <asm/reg.h>
8808+#include <linux/const.h>
8809
8810 /* bytes per L1 cache line */
8811 #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
8812@@ -23,7 +24,7 @@
8813 #define L1_CACHE_SHIFT 7
8814 #endif
8815
8816-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
8817+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8818
8819 #define SMP_CACHE_BYTES L1_CACHE_BYTES
8820
8821diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
8822index 888d8f3..66f581c 100644
8823--- a/arch/powerpc/include/asm/elf.h
8824+++ b/arch/powerpc/include/asm/elf.h
8825@@ -28,8 +28,19 @@
8826 the loader. We need to make sure that it is out of the way of the program
8827 that it will "exec", and that there is sufficient room for the brk. */
8828
8829-extern unsigned long randomize_et_dyn(unsigned long base);
8830-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
8831+#define ELF_ET_DYN_BASE (0x20000000)
8832+
8833+#ifdef CONFIG_PAX_ASLR
8834+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
8835+
8836+#ifdef __powerpc64__
8837+#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
8838+#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
8839+#else
8840+#define PAX_DELTA_MMAP_LEN 15
8841+#define PAX_DELTA_STACK_LEN 15
8842+#endif
8843+#endif
8844
8845 #define ELF_CORE_EFLAGS (is_elf2_task() ? 2 : 0)
8846
8847@@ -129,10 +140,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
8848 (0x7ff >> (PAGE_SHIFT - 12)) : \
8849 (0x3ffff >> (PAGE_SHIFT - 12)))
8850
8851-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
8852-#define arch_randomize_brk arch_randomize_brk
8853-
8854-
8855 #ifdef CONFIG_SPU_BASE
8856 /* Notes used in ET_CORE. Note name is "SPU/<fd>/<filename>". */
8857 #define NT_SPU 1
8858diff --git a/arch/powerpc/include/asm/exec.h b/arch/powerpc/include/asm/exec.h
8859index 8196e9c..d83a9f3 100644
8860--- a/arch/powerpc/include/asm/exec.h
8861+++ b/arch/powerpc/include/asm/exec.h
8862@@ -4,6 +4,6 @@
8863 #ifndef _ASM_POWERPC_EXEC_H
8864 #define _ASM_POWERPC_EXEC_H
8865
8866-extern unsigned long arch_align_stack(unsigned long sp);
8867+#define arch_align_stack(x) ((x) & ~0xfUL)
8868
8869 #endif /* _ASM_POWERPC_EXEC_H */
8870diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
8871index 5acabbd..7ea14fa 100644
8872--- a/arch/powerpc/include/asm/kmap_types.h
8873+++ b/arch/powerpc/include/asm/kmap_types.h
8874@@ -10,7 +10,7 @@
8875 * 2 of the License, or (at your option) any later version.
8876 */
8877
8878-#define KM_TYPE_NR 16
8879+#define KM_TYPE_NR 17
8880
8881 #endif /* __KERNEL__ */
8882 #endif /* _ASM_POWERPC_KMAP_TYPES_H */
8883diff --git a/arch/powerpc/include/asm/local.h b/arch/powerpc/include/asm/local.h
8884index b8da913..c02b593 100644
8885--- a/arch/powerpc/include/asm/local.h
8886+++ b/arch/powerpc/include/asm/local.h
8887@@ -9,21 +9,65 @@ typedef struct
8888 atomic_long_t a;
8889 } local_t;
8890
8891+typedef struct
8892+{
8893+ atomic_long_unchecked_t a;
8894+} local_unchecked_t;
8895+
8896 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
8897
8898 #define local_read(l) atomic_long_read(&(l)->a)
8899+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
8900 #define local_set(l,i) atomic_long_set(&(l)->a, (i))
8901+#define local_set_unchecked(l,i) atomic_long_set_unchecked(&(l)->a, (i))
8902
8903 #define local_add(i,l) atomic_long_add((i),(&(l)->a))
8904+#define local_add_unchecked(i,l) atomic_long_add_unchecked((i),(&(l)->a))
8905 #define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
8906+#define local_sub_unchecked(i,l) atomic_long_sub_unchecked((i),(&(l)->a))
8907 #define local_inc(l) atomic_long_inc(&(l)->a)
8908+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
8909 #define local_dec(l) atomic_long_dec(&(l)->a)
8910+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
8911
8912 static __inline__ long local_add_return(long a, local_t *l)
8913 {
8914 long t;
8915
8916 __asm__ __volatile__(
8917+"1:" PPC_LLARX(%0,0,%2,0) " # local_add_return\n"
8918+
8919+#ifdef CONFIG_PAX_REFCOUNT
8920+" mcrxr cr0\n"
8921+" addo. %0,%1,%0\n"
8922+" bf 4*cr0+so, 3f\n"
8923+"2:.long " "0x00c00b00""\n"
8924+#else
8925+" add %0,%1,%0\n"
8926+#endif
8927+
8928+"3:\n"
8929+ PPC405_ERR77(0,%2)
8930+ PPC_STLCX "%0,0,%2 \n\
8931+ bne- 1b"
8932+
8933+#ifdef CONFIG_PAX_REFCOUNT
8934+"\n4:\n"
8935+ _ASM_EXTABLE(2b, 4b)
8936+#endif
8937+
8938+ : "=&r" (t)
8939+ : "r" (a), "r" (&(l->a.counter))
8940+ : "cc", "memory");
8941+
8942+ return t;
8943+}
8944+
8945+static __inline__ long local_add_return_unchecked(long a, local_unchecked_t *l)
8946+{
8947+ long t;
8948+
8949+ __asm__ __volatile__(
8950 "1:" PPC_LLARX(%0,0,%2,0) " # local_add_return\n\
8951 add %0,%1,%0\n"
8952 PPC405_ERR77(0,%2)
8953@@ -101,6 +145,8 @@ static __inline__ long local_dec_return(local_t *l)
8954
8955 #define local_cmpxchg(l, o, n) \
8956 (cmpxchg_local(&((l)->a.counter), (o), (n)))
8957+#define local_cmpxchg_unchecked(l, o, n) \
8958+ (cmpxchg_local(&((l)->a.counter), (o), (n)))
8959 #define local_xchg(l, n) (xchg_local(&((l)->a.counter), (n)))
8960
8961 /**
8962diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
8963index 8565c25..2865190 100644
8964--- a/arch/powerpc/include/asm/mman.h
8965+++ b/arch/powerpc/include/asm/mman.h
8966@@ -24,7 +24,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
8967 }
8968 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
8969
8970-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
8971+static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
8972 {
8973 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
8974 }
8975diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
8976index 26fe1ae..987ffc5 100644
8977--- a/arch/powerpc/include/asm/page.h
8978+++ b/arch/powerpc/include/asm/page.h
8979@@ -227,8 +227,9 @@ extern long long virt_phys_offset;
8980 * and needs to be executable. This means the whole heap ends
8981 * up being executable.
8982 */
8983-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
8984- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8985+#define VM_DATA_DEFAULT_FLAGS32 \
8986+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
8987+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8988
8989 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
8990 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8991@@ -256,6 +257,9 @@ extern long long virt_phys_offset;
8992 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
8993 #endif
8994
8995+#define ktla_ktva(addr) (addr)
8996+#define ktva_ktla(addr) (addr)
8997+
8998 #ifndef CONFIG_PPC_BOOK3S_64
8999 /*
9000 * Use the top bit of the higher-level page table entries to indicate whether
9001diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
9002index 88693ce..ac6f9ab 100644
9003--- a/arch/powerpc/include/asm/page_64.h
9004+++ b/arch/powerpc/include/asm/page_64.h
9005@@ -153,15 +153,18 @@ do { \
9006 * stack by default, so in the absence of a PT_GNU_STACK program header
9007 * we turn execute permission off.
9008 */
9009-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
9010- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
9011+#define VM_STACK_DEFAULT_FLAGS32 \
9012+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
9013+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
9014
9015 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
9016 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
9017
9018+#ifndef CONFIG_PAX_PAGEEXEC
9019 #define VM_STACK_DEFAULT_FLAGS \
9020 (is_32bit_task() ? \
9021 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
9022+#endif
9023
9024 #include <asm-generic/getorder.h>
9025
9026diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
9027index 4b0be20..c15a27d 100644
9028--- a/arch/powerpc/include/asm/pgalloc-64.h
9029+++ b/arch/powerpc/include/asm/pgalloc-64.h
9030@@ -54,6 +54,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
9031 #ifndef CONFIG_PPC_64K_PAGES
9032
9033 #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD)
9034+#define pgd_populate_kernel(MM, PGD, PUD) pgd_populate((MM), (PGD), (PUD))
9035
9036 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
9037 {
9038@@ -71,6 +72,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
9039 pud_set(pud, (unsigned long)pmd);
9040 }
9041
9042+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
9043+{
9044+ pud_populate(mm, pud, pmd);
9045+}
9046+
9047 #define pmd_populate(mm, pmd, pte_page) \
9048 pmd_populate_kernel(mm, pmd, page_address(pte_page))
9049 #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
9050@@ -173,6 +179,7 @@ extern void __tlb_remove_table(void *_table);
9051 #endif
9052
9053 #define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
9054+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
9055
9056 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
9057 pte_t *pte)
9058diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
9059index d98c1ec..9f61569 100644
9060--- a/arch/powerpc/include/asm/pgtable.h
9061+++ b/arch/powerpc/include/asm/pgtable.h
9062@@ -2,6 +2,7 @@
9063 #define _ASM_POWERPC_PGTABLE_H
9064 #ifdef __KERNEL__
9065
9066+#include <linux/const.h>
9067 #ifndef __ASSEMBLY__
9068 #include <linux/mmdebug.h>
9069 #include <asm/processor.h> /* For TASK_SIZE */
9070diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
9071index 4aad413..85d86bf 100644
9072--- a/arch/powerpc/include/asm/pte-hash32.h
9073+++ b/arch/powerpc/include/asm/pte-hash32.h
9074@@ -21,6 +21,7 @@
9075 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
9076 #define _PAGE_USER 0x004 /* usermode access allowed */
9077 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
9078+#define _PAGE_EXEC _PAGE_GUARDED
9079 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
9080 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
9081 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
9082diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
9083index 0c05059..7e056e4 100644
9084--- a/arch/powerpc/include/asm/reg.h
9085+++ b/arch/powerpc/include/asm/reg.h
9086@@ -251,6 +251,7 @@
9087 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
9088 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
9089 #define DSISR_NOHPTE 0x40000000 /* no translation found */
9090+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
9091 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
9092 #define DSISR_ISSTORE 0x02000000 /* access was a store */
9093 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
9094diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
9095index 5a6614a..d89995d1 100644
9096--- a/arch/powerpc/include/asm/smp.h
9097+++ b/arch/powerpc/include/asm/smp.h
9098@@ -51,7 +51,7 @@ struct smp_ops_t {
9099 int (*cpu_disable)(void);
9100 void (*cpu_die)(unsigned int nr);
9101 int (*cpu_bootable)(unsigned int nr);
9102-};
9103+} __no_const;
9104
9105 extern void smp_send_debugger_break(void);
9106 extern void start_secondary_resume(void);
9107diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
9108index 4dbe072..b803275 100644
9109--- a/arch/powerpc/include/asm/spinlock.h
9110+++ b/arch/powerpc/include/asm/spinlock.h
9111@@ -204,13 +204,29 @@ static inline long __arch_read_trylock(arch_rwlock_t *rw)
9112 __asm__ __volatile__(
9113 "1: " PPC_LWARX(%0,0,%1,1) "\n"
9114 __DO_SIGN_EXTEND
9115-" addic. %0,%0,1\n\
9116- ble- 2f\n"
9117+
9118+#ifdef CONFIG_PAX_REFCOUNT
9119+" mcrxr cr0\n"
9120+" addico. %0,%0,1\n"
9121+" bf 4*cr0+so, 3f\n"
9122+"2:.long " "0x00c00b00""\n"
9123+#else
9124+" addic. %0,%0,1\n"
9125+#endif
9126+
9127+"3:\n"
9128+ "ble- 4f\n"
9129 PPC405_ERR77(0,%1)
9130 " stwcx. %0,0,%1\n\
9131 bne- 1b\n"
9132 PPC_ACQUIRE_BARRIER
9133-"2:" : "=&r" (tmp)
9134+"4:"
9135+
9136+#ifdef CONFIG_PAX_REFCOUNT
9137+ _ASM_EXTABLE(2b,4b)
9138+#endif
9139+
9140+ : "=&r" (tmp)
9141 : "r" (&rw->lock)
9142 : "cr0", "xer", "memory");
9143
9144@@ -286,11 +302,27 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
9145 __asm__ __volatile__(
9146 "# read_unlock\n\t"
9147 PPC_RELEASE_BARRIER
9148-"1: lwarx %0,0,%1\n\
9149- addic %0,%0,-1\n"
9150+"1: lwarx %0,0,%1\n"
9151+
9152+#ifdef CONFIG_PAX_REFCOUNT
9153+" mcrxr cr0\n"
9154+" addico. %0,%0,-1\n"
9155+" bf 4*cr0+so, 3f\n"
9156+"2:.long " "0x00c00b00""\n"
9157+#else
9158+" addic. %0,%0,-1\n"
9159+#endif
9160+
9161+"3:\n"
9162 PPC405_ERR77(0,%1)
9163 " stwcx. %0,0,%1\n\
9164 bne- 1b"
9165+
9166+#ifdef CONFIG_PAX_REFCOUNT
9167+"\n4:\n"
9168+ _ASM_EXTABLE(2b, 4b)
9169+#endif
9170+
9171 : "=&r"(tmp)
9172 : "r"(&rw->lock)
9173 : "cr0", "xer", "memory");
9174diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
9175index b034ecd..af7e31f 100644
9176--- a/arch/powerpc/include/asm/thread_info.h
9177+++ b/arch/powerpc/include/asm/thread_info.h
9178@@ -107,6 +107,8 @@ static inline struct thread_info *current_thread_info(void)
9179 #if defined(CONFIG_PPC64)
9180 #define TIF_ELF2ABI 18 /* function descriptors must die! */
9181 #endif
9182+/* mask must be expressable within 16 bits to satisfy 'andi' instruction reqs */
9183+#define TIF_GRSEC_SETXID 6 /* update credentials on syscall entry/exit */
9184
9185 /* as above, but as bit values */
9186 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
9187@@ -125,9 +127,10 @@ static inline struct thread_info *current_thread_info(void)
9188 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
9189 #define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE)
9190 #define _TIF_NOHZ (1<<TIF_NOHZ)
9191+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
9192 #define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
9193 _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
9194- _TIF_NOHZ)
9195+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
9196
9197 #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
9198 _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
9199diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
9200index 9485b43..3bd3c16 100644
9201--- a/arch/powerpc/include/asm/uaccess.h
9202+++ b/arch/powerpc/include/asm/uaccess.h
9203@@ -58,6 +58,7 @@
9204
9205 #endif
9206
9207+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
9208 #define access_ok(type, addr, size) \
9209 (__chk_user_ptr(addr), \
9210 __access_ok((__force unsigned long)(addr), (size), get_fs()))
9211@@ -318,52 +319,6 @@ do { \
9212 extern unsigned long __copy_tofrom_user(void __user *to,
9213 const void __user *from, unsigned long size);
9214
9215-#ifndef __powerpc64__
9216-
9217-static inline unsigned long copy_from_user(void *to,
9218- const void __user *from, unsigned long n)
9219-{
9220- unsigned long over;
9221-
9222- if (access_ok(VERIFY_READ, from, n))
9223- return __copy_tofrom_user((__force void __user *)to, from, n);
9224- if ((unsigned long)from < TASK_SIZE) {
9225- over = (unsigned long)from + n - TASK_SIZE;
9226- return __copy_tofrom_user((__force void __user *)to, from,
9227- n - over) + over;
9228- }
9229- return n;
9230-}
9231-
9232-static inline unsigned long copy_to_user(void __user *to,
9233- const void *from, unsigned long n)
9234-{
9235- unsigned long over;
9236-
9237- if (access_ok(VERIFY_WRITE, to, n))
9238- return __copy_tofrom_user(to, (__force void __user *)from, n);
9239- if ((unsigned long)to < TASK_SIZE) {
9240- over = (unsigned long)to + n - TASK_SIZE;
9241- return __copy_tofrom_user(to, (__force void __user *)from,
9242- n - over) + over;
9243- }
9244- return n;
9245-}
9246-
9247-#else /* __powerpc64__ */
9248-
9249-#define __copy_in_user(to, from, size) \
9250- __copy_tofrom_user((to), (from), (size))
9251-
9252-extern unsigned long copy_from_user(void *to, const void __user *from,
9253- unsigned long n);
9254-extern unsigned long copy_to_user(void __user *to, const void *from,
9255- unsigned long n);
9256-extern unsigned long copy_in_user(void __user *to, const void __user *from,
9257- unsigned long n);
9258-
9259-#endif /* __powerpc64__ */
9260-
9261 static inline unsigned long __copy_from_user_inatomic(void *to,
9262 const void __user *from, unsigned long n)
9263 {
9264@@ -387,6 +342,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
9265 if (ret == 0)
9266 return 0;
9267 }
9268+
9269+ if (!__builtin_constant_p(n))
9270+ check_object_size(to, n, false);
9271+
9272 return __copy_tofrom_user((__force void __user *)to, from, n);
9273 }
9274
9275@@ -413,6 +372,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
9276 if (ret == 0)
9277 return 0;
9278 }
9279+
9280+ if (!__builtin_constant_p(n))
9281+ check_object_size(from, n, true);
9282+
9283 return __copy_tofrom_user(to, (__force const void __user *)from, n);
9284 }
9285
9286@@ -430,6 +393,92 @@ static inline unsigned long __copy_to_user(void __user *to,
9287 return __copy_to_user_inatomic(to, from, size);
9288 }
9289
9290+#ifndef __powerpc64__
9291+
9292+static inline unsigned long __must_check copy_from_user(void *to,
9293+ const void __user *from, unsigned long n)
9294+{
9295+ unsigned long over;
9296+
9297+ if ((long)n < 0)
9298+ return n;
9299+
9300+ if (access_ok(VERIFY_READ, from, n)) {
9301+ if (!__builtin_constant_p(n))
9302+ check_object_size(to, n, false);
9303+ return __copy_tofrom_user((__force void __user *)to, from, n);
9304+ }
9305+ if ((unsigned long)from < TASK_SIZE) {
9306+ over = (unsigned long)from + n - TASK_SIZE;
9307+ if (!__builtin_constant_p(n - over))
9308+ check_object_size(to, n - over, false);
9309+ return __copy_tofrom_user((__force void __user *)to, from,
9310+ n - over) + over;
9311+ }
9312+ return n;
9313+}
9314+
9315+static inline unsigned long __must_check copy_to_user(void __user *to,
9316+ const void *from, unsigned long n)
9317+{
9318+ unsigned long over;
9319+
9320+ if ((long)n < 0)
9321+ return n;
9322+
9323+ if (access_ok(VERIFY_WRITE, to, n)) {
9324+ if (!__builtin_constant_p(n))
9325+ check_object_size(from, n, true);
9326+ return __copy_tofrom_user(to, (__force void __user *)from, n);
9327+ }
9328+ if ((unsigned long)to < TASK_SIZE) {
9329+ over = (unsigned long)to + n - TASK_SIZE;
9330+ if (!__builtin_constant_p(n))
9331+ check_object_size(from, n - over, true);
9332+ return __copy_tofrom_user(to, (__force void __user *)from,
9333+ n - over) + over;
9334+ }
9335+ return n;
9336+}
9337+
9338+#else /* __powerpc64__ */
9339+
9340+#define __copy_in_user(to, from, size) \
9341+ __copy_tofrom_user((to), (from), (size))
9342+
9343+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
9344+{
9345+ if ((long)n < 0 || n > INT_MAX)
9346+ return n;
9347+
9348+ if (!__builtin_constant_p(n))
9349+ check_object_size(to, n, false);
9350+
9351+ if (likely(access_ok(VERIFY_READ, from, n)))
9352+ n = __copy_from_user(to, from, n);
9353+ else
9354+ memset(to, 0, n);
9355+ return n;
9356+}
9357+
9358+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
9359+{
9360+ if ((long)n < 0 || n > INT_MAX)
9361+ return n;
9362+
9363+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
9364+ if (!__builtin_constant_p(n))
9365+ check_object_size(from, n, true);
9366+ n = __copy_to_user(to, from, n);
9367+ }
9368+ return n;
9369+}
9370+
9371+extern unsigned long copy_in_user(void __user *to, const void __user *from,
9372+ unsigned long n);
9373+
9374+#endif /* __powerpc64__ */
9375+
9376 extern unsigned long __clear_user(void __user *addr, unsigned long size);
9377
9378 static inline unsigned long clear_user(void __user *addr, unsigned long size)
9379diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
9380index 670c312..60c2b52 100644
9381--- a/arch/powerpc/kernel/Makefile
9382+++ b/arch/powerpc/kernel/Makefile
9383@@ -27,6 +27,8 @@ CFLAGS_REMOVE_ftrace.o = -pg -mno-sched-epilog
9384 CFLAGS_REMOVE_time.o = -pg -mno-sched-epilog
9385 endif
9386
9387+CFLAGS_REMOVE_prom_init.o += $(LATENT_ENTROPY_PLUGIN_CFLAGS)
9388+
9389 obj-y := cputable.o ptrace.o syscalls.o \
9390 irq.o align.o signal_32.o pmc.o vdso.o \
9391 process.o systbl.o idle.o \
9392diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
9393index bb9cac6..5181202 100644
9394--- a/arch/powerpc/kernel/exceptions-64e.S
9395+++ b/arch/powerpc/kernel/exceptions-64e.S
9396@@ -1010,6 +1010,7 @@ storage_fault_common:
9397 std r14,_DAR(r1)
9398 std r15,_DSISR(r1)
9399 addi r3,r1,STACK_FRAME_OVERHEAD
9400+ bl save_nvgprs
9401 mr r4,r14
9402 mr r5,r15
9403 ld r14,PACA_EXGEN+EX_R14(r13)
9404@@ -1018,8 +1019,7 @@ storage_fault_common:
9405 cmpdi r3,0
9406 bne- 1f
9407 b ret_from_except_lite
9408-1: bl save_nvgprs
9409- mr r5,r3
9410+1: mr r5,r3
9411 addi r3,r1,STACK_FRAME_OVERHEAD
9412 ld r4,_DAR(r1)
9413 bl bad_page_fault
9414diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
9415index 050f79a..f385bfe 100644
9416--- a/arch/powerpc/kernel/exceptions-64s.S
9417+++ b/arch/powerpc/kernel/exceptions-64s.S
9418@@ -1593,10 +1593,10 @@ handle_page_fault:
9419 11: ld r4,_DAR(r1)
9420 ld r5,_DSISR(r1)
9421 addi r3,r1,STACK_FRAME_OVERHEAD
9422+ bl save_nvgprs
9423 bl do_page_fault
9424 cmpdi r3,0
9425 beq+ 12f
9426- bl save_nvgprs
9427 mr r5,r3
9428 addi r3,r1,STACK_FRAME_OVERHEAD
9429 lwz r4,_DAR(r1)
9430diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
9431index 4c5891d..a5d88bb 100644
9432--- a/arch/powerpc/kernel/irq.c
9433+++ b/arch/powerpc/kernel/irq.c
9434@@ -461,6 +461,8 @@ void migrate_irqs(void)
9435 }
9436 #endif
9437
9438+extern void gr_handle_kernel_exploit(void);
9439+
9440 static inline void check_stack_overflow(void)
9441 {
9442 #ifdef CONFIG_DEBUG_STACKOVERFLOW
9443@@ -473,6 +475,7 @@ static inline void check_stack_overflow(void)
9444 printk("do_IRQ: stack overflow: %ld\n",
9445 sp - sizeof(struct thread_info));
9446 dump_stack();
9447+ gr_handle_kernel_exploit();
9448 }
9449 #endif
9450 }
9451diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
9452index 6cff040..74ac5d1b 100644
9453--- a/arch/powerpc/kernel/module_32.c
9454+++ b/arch/powerpc/kernel/module_32.c
9455@@ -161,7 +161,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
9456 me->arch.core_plt_section = i;
9457 }
9458 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
9459- printk("Module doesn't contain .plt or .init.plt sections.\n");
9460+ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
9461 return -ENOEXEC;
9462 }
9463
9464@@ -191,11 +191,16 @@ static uint32_t do_plt_call(void *location,
9465
9466 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
9467 /* Init, or core PLT? */
9468- if (location >= mod->module_core
9469- && location < mod->module_core + mod->core_size)
9470+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
9471+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
9472 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
9473- else
9474+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
9475+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
9476 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
9477+ else {
9478+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
9479+ return ~0UL;
9480+ }
9481
9482 /* Find this entry, or if that fails, the next avail. entry */
9483 while (entry->jump[0]) {
9484@@ -299,7 +304,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
9485 }
9486 #ifdef CONFIG_DYNAMIC_FTRACE
9487 module->arch.tramp =
9488- do_plt_call(module->module_core,
9489+ do_plt_call(module->module_core_rx,
9490 (unsigned long)ftrace_caller,
9491 sechdrs, module);
9492 #endif
9493diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
9494index bf44ae9..6d2ce71 100644
9495--- a/arch/powerpc/kernel/process.c
9496+++ b/arch/powerpc/kernel/process.c
9497@@ -1039,8 +1039,8 @@ void show_regs(struct pt_regs * regs)
9498 * Lookup NIP late so we have the best change of getting the
9499 * above info out without failing
9500 */
9501- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
9502- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
9503+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
9504+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
9505 #endif
9506 show_stack(current, (unsigned long *) regs->gpr[1]);
9507 if (!user_mode(regs))
9508@@ -1558,10 +1558,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
9509 newsp = stack[0];
9510 ip = stack[STACK_FRAME_LR_SAVE];
9511 if (!firstframe || ip != lr) {
9512- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
9513+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
9514 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
9515 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
9516- printk(" (%pS)",
9517+ printk(" (%pA)",
9518 (void *)current->ret_stack[curr_frame].ret);
9519 curr_frame--;
9520 }
9521@@ -1581,7 +1581,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
9522 struct pt_regs *regs = (struct pt_regs *)
9523 (sp + STACK_FRAME_OVERHEAD);
9524 lr = regs->link;
9525- printk("--- interrupt: %lx at %pS\n LR = %pS\n",
9526+ printk("--- interrupt: %lx at %pA\n LR = %pA\n",
9527 regs->trap, (void *)regs->nip, (void *)lr);
9528 firstframe = 1;
9529 }
9530@@ -1617,58 +1617,3 @@ void notrace __ppc64_runlatch_off(void)
9531 mtspr(SPRN_CTRLT, ctrl);
9532 }
9533 #endif /* CONFIG_PPC64 */
9534-
9535-unsigned long arch_align_stack(unsigned long sp)
9536-{
9537- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
9538- sp -= get_random_int() & ~PAGE_MASK;
9539- return sp & ~0xf;
9540-}
9541-
9542-static inline unsigned long brk_rnd(void)
9543-{
9544- unsigned long rnd = 0;
9545-
9546- /* 8MB for 32bit, 1GB for 64bit */
9547- if (is_32bit_task())
9548- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
9549- else
9550- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
9551-
9552- return rnd << PAGE_SHIFT;
9553-}
9554-
9555-unsigned long arch_randomize_brk(struct mm_struct *mm)
9556-{
9557- unsigned long base = mm->brk;
9558- unsigned long ret;
9559-
9560-#ifdef CONFIG_PPC_STD_MMU_64
9561- /*
9562- * If we are using 1TB segments and we are allowed to randomise
9563- * the heap, we can put it above 1TB so it is backed by a 1TB
9564- * segment. Otherwise the heap will be in the bottom 1TB
9565- * which always uses 256MB segments and this may result in a
9566- * performance penalty.
9567- */
9568- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
9569- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
9570-#endif
9571-
9572- ret = PAGE_ALIGN(base + brk_rnd());
9573-
9574- if (ret < mm->brk)
9575- return mm->brk;
9576-
9577- return ret;
9578-}
9579-
9580-unsigned long randomize_et_dyn(unsigned long base)
9581-{
9582- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
9583-
9584- if (ret < base)
9585- return base;
9586-
9587- return ret;
9588-}
9589diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
9590index 2e3d2bf..35df241 100644
9591--- a/arch/powerpc/kernel/ptrace.c
9592+++ b/arch/powerpc/kernel/ptrace.c
9593@@ -1762,6 +1762,10 @@ long arch_ptrace(struct task_struct *child, long request,
9594 return ret;
9595 }
9596
9597+#ifdef CONFIG_GRKERNSEC_SETXID
9598+extern void gr_delayed_cred_worker(void);
9599+#endif
9600+
9601 /*
9602 * We must return the syscall number to actually look up in the table.
9603 * This can be -1L to skip running any syscall at all.
9604@@ -1774,6 +1778,11 @@ long do_syscall_trace_enter(struct pt_regs *regs)
9605
9606 secure_computing_strict(regs->gpr[0]);
9607
9608+#ifdef CONFIG_GRKERNSEC_SETXID
9609+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
9610+ gr_delayed_cred_worker();
9611+#endif
9612+
9613 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
9614 tracehook_report_syscall_entry(regs))
9615 /*
9616@@ -1808,6 +1817,11 @@ void do_syscall_trace_leave(struct pt_regs *regs)
9617 {
9618 int step;
9619
9620+#ifdef CONFIG_GRKERNSEC_SETXID
9621+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
9622+ gr_delayed_cred_worker();
9623+#endif
9624+
9625 audit_syscall_exit(regs);
9626
9627 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
9628diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
9629index b171001..4ac7ac5 100644
9630--- a/arch/powerpc/kernel/signal_32.c
9631+++ b/arch/powerpc/kernel/signal_32.c
9632@@ -1011,7 +1011,7 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
9633 /* Save user registers on the stack */
9634 frame = &rt_sf->uc.uc_mcontext;
9635 addr = frame;
9636- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
9637+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
9638 sigret = 0;
9639 tramp = current->mm->context.vdso_base + vdso32_rt_sigtramp;
9640 } else {
9641diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
9642index 2cb0c94..c0c0bc9 100644
9643--- a/arch/powerpc/kernel/signal_64.c
9644+++ b/arch/powerpc/kernel/signal_64.c
9645@@ -754,7 +754,7 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs
9646 current->thread.fp_state.fpscr = 0;
9647
9648 /* Set up to return from userspace. */
9649- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
9650+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
9651 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
9652 } else {
9653 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
9654diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
9655index 0dc43f9..a885d33 100644
9656--- a/arch/powerpc/kernel/traps.c
9657+++ b/arch/powerpc/kernel/traps.c
9658@@ -36,6 +36,7 @@
9659 #include <linux/debugfs.h>
9660 #include <linux/ratelimit.h>
9661 #include <linux/context_tracking.h>
9662+#include <linux/uaccess.h>
9663
9664 #include <asm/emulated_ops.h>
9665 #include <asm/pgtable.h>
9666@@ -142,6 +143,8 @@ static unsigned __kprobes long oops_begin(struct pt_regs *regs)
9667 return flags;
9668 }
9669
9670+extern void gr_handle_kernel_exploit(void);
9671+
9672 static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
9673 int signr)
9674 {
9675@@ -191,6 +194,9 @@ static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
9676 panic("Fatal exception in interrupt");
9677 if (panic_on_oops)
9678 panic("Fatal exception");
9679+
9680+ gr_handle_kernel_exploit();
9681+
9682 do_exit(signr);
9683 }
9684
9685@@ -1137,6 +1143,26 @@ void __kprobes program_check_exception(struct pt_regs *regs)
9686 enum ctx_state prev_state = exception_enter();
9687 unsigned int reason = get_reason(regs);
9688
9689+#ifdef CONFIG_PAX_REFCOUNT
9690+ unsigned int bkpt;
9691+ const struct exception_table_entry *entry;
9692+
9693+ if (reason & REASON_ILLEGAL) {
9694+ /* Check if PaX bad instruction */
9695+ if (!probe_kernel_address(regs->nip, bkpt) && bkpt == 0xc00b00) {
9696+ current->thread.trap_nr = 0;
9697+ pax_report_refcount_overflow(regs);
9698+ /* fixup_exception() for PowerPC does not exist, simulate its job */
9699+ if ((entry = search_exception_tables(regs->nip)) != NULL) {
9700+ regs->nip = entry->fixup;
9701+ return;
9702+ }
9703+ /* fixup_exception() could not handle */
9704+ goto bail;
9705+ }
9706+ }
9707+#endif
9708+
9709 /* We can now get here via a FP Unavailable exception if the core
9710 * has no FPU, in that case the reason flags will be 0 */
9711
9712diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
9713index f174351..5722009 100644
9714--- a/arch/powerpc/kernel/vdso.c
9715+++ b/arch/powerpc/kernel/vdso.c
9716@@ -35,6 +35,7 @@
9717 #include <asm/vdso.h>
9718 #include <asm/vdso_datapage.h>
9719 #include <asm/setup.h>
9720+#include <asm/mman.h>
9721
9722 #undef DEBUG
9723
9724@@ -221,7 +222,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
9725 vdso_base = VDSO32_MBASE;
9726 #endif
9727
9728- current->mm->context.vdso_base = 0;
9729+ current->mm->context.vdso_base = ~0UL;
9730
9731 /* vDSO has a problem and was disabled, just don't "enable" it for the
9732 * process
9733@@ -241,7 +242,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
9734 vdso_base = get_unmapped_area(NULL, vdso_base,
9735 (vdso_pages << PAGE_SHIFT) +
9736 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
9737- 0, 0);
9738+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
9739 if (IS_ERR_VALUE(vdso_base)) {
9740 rc = vdso_base;
9741 goto fail_mmapsem;
9742diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
9743index 4c79284..0e462c3 100644
9744--- a/arch/powerpc/kvm/powerpc.c
9745+++ b/arch/powerpc/kvm/powerpc.c
9746@@ -1338,7 +1338,7 @@ void kvmppc_init_lpid(unsigned long nr_lpids_param)
9747 }
9748 EXPORT_SYMBOL_GPL(kvmppc_init_lpid);
9749
9750-int kvm_arch_init(void *opaque)
9751+int kvm_arch_init(const void *opaque)
9752 {
9753 return 0;
9754 }
9755diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
9756index 5eea6f3..5d10396 100644
9757--- a/arch/powerpc/lib/usercopy_64.c
9758+++ b/arch/powerpc/lib/usercopy_64.c
9759@@ -9,22 +9,6 @@
9760 #include <linux/module.h>
9761 #include <asm/uaccess.h>
9762
9763-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
9764-{
9765- if (likely(access_ok(VERIFY_READ, from, n)))
9766- n = __copy_from_user(to, from, n);
9767- else
9768- memset(to, 0, n);
9769- return n;
9770-}
9771-
9772-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
9773-{
9774- if (likely(access_ok(VERIFY_WRITE, to, n)))
9775- n = __copy_to_user(to, from, n);
9776- return n;
9777-}
9778-
9779 unsigned long copy_in_user(void __user *to, const void __user *from,
9780 unsigned long n)
9781 {
9782@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
9783 return n;
9784 }
9785
9786-EXPORT_SYMBOL(copy_from_user);
9787-EXPORT_SYMBOL(copy_to_user);
9788 EXPORT_SYMBOL(copy_in_user);
9789
9790diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
9791index 51ab9e7..7d3c78b 100644
9792--- a/arch/powerpc/mm/fault.c
9793+++ b/arch/powerpc/mm/fault.c
9794@@ -33,6 +33,10 @@
9795 #include <linux/magic.h>
9796 #include <linux/ratelimit.h>
9797 #include <linux/context_tracking.h>
9798+#include <linux/slab.h>
9799+#include <linux/pagemap.h>
9800+#include <linux/compiler.h>
9801+#include <linux/unistd.h>
9802
9803 #include <asm/firmware.h>
9804 #include <asm/page.h>
9805@@ -69,6 +73,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
9806 }
9807 #endif
9808
9809+#ifdef CONFIG_PAX_PAGEEXEC
9810+/*
9811+ * PaX: decide what to do with offenders (regs->nip = fault address)
9812+ *
9813+ * returns 1 when task should be killed
9814+ */
9815+static int pax_handle_fetch_fault(struct pt_regs *regs)
9816+{
9817+ return 1;
9818+}
9819+
9820+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
9821+{
9822+ unsigned long i;
9823+
9824+ printk(KERN_ERR "PAX: bytes at PC: ");
9825+ for (i = 0; i < 5; i++) {
9826+ unsigned int c;
9827+ if (get_user(c, (unsigned int __user *)pc+i))
9828+ printk(KERN_CONT "???????? ");
9829+ else
9830+ printk(KERN_CONT "%08x ", c);
9831+ }
9832+ printk("\n");
9833+}
9834+#endif
9835+
9836 /*
9837 * Check whether the instruction at regs->nip is a store using
9838 * an update addressing form which will update r1.
9839@@ -216,7 +247,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
9840 * indicate errors in DSISR but can validly be set in SRR1.
9841 */
9842 if (trap == 0x400)
9843- error_code &= 0x48200000;
9844+ error_code &= 0x58200000;
9845 else
9846 is_write = error_code & DSISR_ISSTORE;
9847 #else
9848@@ -378,7 +409,7 @@ good_area:
9849 * "undefined". Of those that can be set, this is the only
9850 * one which seems bad.
9851 */
9852- if (error_code & 0x10000000)
9853+ if (error_code & DSISR_GUARDED)
9854 /* Guarded storage error. */
9855 goto bad_area;
9856 #endif /* CONFIG_8xx */
9857@@ -393,7 +424,7 @@ good_area:
9858 * processors use the same I/D cache coherency mechanism
9859 * as embedded.
9860 */
9861- if (error_code & DSISR_PROTFAULT)
9862+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
9863 goto bad_area;
9864 #endif /* CONFIG_PPC_STD_MMU */
9865
9866@@ -483,6 +514,23 @@ bad_area:
9867 bad_area_nosemaphore:
9868 /* User mode accesses cause a SIGSEGV */
9869 if (user_mode(regs)) {
9870+
9871+#ifdef CONFIG_PAX_PAGEEXEC
9872+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
9873+#ifdef CONFIG_PPC_STD_MMU
9874+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
9875+#else
9876+ if (is_exec && regs->nip == address) {
9877+#endif
9878+ switch (pax_handle_fetch_fault(regs)) {
9879+ }
9880+
9881+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
9882+ do_group_exit(SIGKILL);
9883+ }
9884+ }
9885+#endif
9886+
9887 _exception(SIGSEGV, regs, code, address);
9888 goto bail;
9889 }
9890diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c
9891index cb8bdbe..cde4bc7 100644
9892--- a/arch/powerpc/mm/mmap.c
9893+++ b/arch/powerpc/mm/mmap.c
9894@@ -53,10 +53,14 @@ static inline int mmap_is_legacy(void)
9895 return sysctl_legacy_va_layout;
9896 }
9897
9898-static unsigned long mmap_rnd(void)
9899+static unsigned long mmap_rnd(struct mm_struct *mm)
9900 {
9901 unsigned long rnd = 0;
9902
9903+#ifdef CONFIG_PAX_RANDMMAP
9904+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9905+#endif
9906+
9907 if (current->flags & PF_RANDOMIZE) {
9908 /* 8MB for 32bit, 1GB for 64bit */
9909 if (is_32bit_task())
9910@@ -67,7 +71,7 @@ static unsigned long mmap_rnd(void)
9911 return rnd << PAGE_SHIFT;
9912 }
9913
9914-static inline unsigned long mmap_base(void)
9915+static inline unsigned long mmap_base(struct mm_struct *mm)
9916 {
9917 unsigned long gap = rlimit(RLIMIT_STACK);
9918
9919@@ -76,7 +80,7 @@ static inline unsigned long mmap_base(void)
9920 else if (gap > MAX_GAP)
9921 gap = MAX_GAP;
9922
9923- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
9924+ return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd(mm));
9925 }
9926
9927 /*
9928@@ -91,9 +95,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
9929 */
9930 if (mmap_is_legacy()) {
9931 mm->mmap_base = TASK_UNMAPPED_BASE;
9932+
9933+#ifdef CONFIG_PAX_RANDMMAP
9934+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9935+ mm->mmap_base += mm->delta_mmap;
9936+#endif
9937+
9938 mm->get_unmapped_area = arch_get_unmapped_area;
9939 } else {
9940- mm->mmap_base = mmap_base();
9941+ mm->mmap_base = mmap_base(mm);
9942+
9943+#ifdef CONFIG_PAX_RANDMMAP
9944+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9945+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
9946+#endif
9947+
9948 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
9949 }
9950 }
9951diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
9952index b0c75cc..ef7fb93 100644
9953--- a/arch/powerpc/mm/slice.c
9954+++ b/arch/powerpc/mm/slice.c
9955@@ -103,7 +103,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
9956 if ((mm->task_size - len) < addr)
9957 return 0;
9958 vma = find_vma(mm, addr);
9959- return (!vma || (addr + len) <= vma->vm_start);
9960+ return check_heap_stack_gap(vma, addr, len, 0);
9961 }
9962
9963 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
9964@@ -277,6 +277,12 @@ static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
9965 info.align_offset = 0;
9966
9967 addr = TASK_UNMAPPED_BASE;
9968+
9969+#ifdef CONFIG_PAX_RANDMMAP
9970+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9971+ addr += mm->delta_mmap;
9972+#endif
9973+
9974 while (addr < TASK_SIZE) {
9975 info.low_limit = addr;
9976 if (!slice_scan_available(addr, available, 1, &addr))
9977@@ -410,6 +416,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
9978 if (fixed && addr > (mm->task_size - len))
9979 return -ENOMEM;
9980
9981+#ifdef CONFIG_PAX_RANDMMAP
9982+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
9983+ addr = 0;
9984+#endif
9985+
9986 /* If hint, make sure it matches our alignment restrictions */
9987 if (!fixed && addr) {
9988 addr = _ALIGN_UP(addr, 1ul << pshift);
9989diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
9990index 3afa6f4..40c53ff 100644
9991--- a/arch/powerpc/net/bpf_jit_comp.c
9992+++ b/arch/powerpc/net/bpf_jit_comp.c
9993@@ -697,5 +697,6 @@ void bpf_jit_free(struct bpf_prog *fp)
9994 {
9995 if (fp->jited)
9996 module_free(NULL, fp->bpf_func);
9997- kfree(fp);
9998+
9999+ bpf_prog_unlock_free(fp);
10000 }
10001diff --git a/arch/powerpc/platforms/cell/celleb_scc_pciex.c b/arch/powerpc/platforms/cell/celleb_scc_pciex.c
10002index 4278acf..67fd0e6 100644
10003--- a/arch/powerpc/platforms/cell/celleb_scc_pciex.c
10004+++ b/arch/powerpc/platforms/cell/celleb_scc_pciex.c
10005@@ -400,8 +400,8 @@ static int scc_pciex_write_config(struct pci_bus *bus, unsigned int devfn,
10006 }
10007
10008 static struct pci_ops scc_pciex_pci_ops = {
10009- scc_pciex_read_config,
10010- scc_pciex_write_config,
10011+ .read = scc_pciex_read_config,
10012+ .write = scc_pciex_write_config,
10013 };
10014
10015 static void pciex_clear_intr_all(unsigned int __iomem *base)
10016diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
10017index d966bbe..372124a 100644
10018--- a/arch/powerpc/platforms/cell/spufs/file.c
10019+++ b/arch/powerpc/platforms/cell/spufs/file.c
10020@@ -280,9 +280,9 @@ spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
10021 return VM_FAULT_NOPAGE;
10022 }
10023
10024-static int spufs_mem_mmap_access(struct vm_area_struct *vma,
10025+static ssize_t spufs_mem_mmap_access(struct vm_area_struct *vma,
10026 unsigned long address,
10027- void *buf, int len, int write)
10028+ void *buf, size_t len, int write)
10029 {
10030 struct spu_context *ctx = vma->vm_file->private_data;
10031 unsigned long offset = address - vma->vm_start;
10032diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
10033index fa934fe..c296056 100644
10034--- a/arch/s390/include/asm/atomic.h
10035+++ b/arch/s390/include/asm/atomic.h
10036@@ -412,4 +412,14 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
10037 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
10038 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
10039
10040+#define atomic64_read_unchecked(v) atomic64_read(v)
10041+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
10042+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
10043+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
10044+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
10045+#define atomic64_inc_unchecked(v) atomic64_inc(v)
10046+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
10047+#define atomic64_dec_unchecked(v) atomic64_dec(v)
10048+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
10049+
10050 #endif /* __ARCH_S390_ATOMIC__ */
10051diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h
10052index 19ff956..8d39cb1 100644
10053--- a/arch/s390/include/asm/barrier.h
10054+++ b/arch/s390/include/asm/barrier.h
10055@@ -37,7 +37,7 @@
10056 do { \
10057 compiletime_assert_atomic_type(*p); \
10058 barrier(); \
10059- ACCESS_ONCE(*p) = (v); \
10060+ ACCESS_ONCE_RW(*p) = (v); \
10061 } while (0)
10062
10063 #define smp_load_acquire(p) \
10064diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
10065index 4d7ccac..d03d0ad 100644
10066--- a/arch/s390/include/asm/cache.h
10067+++ b/arch/s390/include/asm/cache.h
10068@@ -9,8 +9,10 @@
10069 #ifndef __ARCH_S390_CACHE_H
10070 #define __ARCH_S390_CACHE_H
10071
10072-#define L1_CACHE_BYTES 256
10073+#include <linux/const.h>
10074+
10075 #define L1_CACHE_SHIFT 8
10076+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
10077 #define NET_SKB_PAD 32
10078
10079 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
10080diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
10081index 78f4f87..598ce39 100644
10082--- a/arch/s390/include/asm/elf.h
10083+++ b/arch/s390/include/asm/elf.h
10084@@ -162,8 +162,14 @@ extern unsigned int vdso_enabled;
10085 the loader. We need to make sure that it is out of the way of the program
10086 that it will "exec", and that there is sufficient room for the brk. */
10087
10088-extern unsigned long randomize_et_dyn(unsigned long base);
10089-#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
10090+#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
10091+
10092+#ifdef CONFIG_PAX_ASLR
10093+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
10094+
10095+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
10096+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
10097+#endif
10098
10099 /* This yields a mask that user programs can use to figure out what
10100 instruction set this CPU supports. */
10101@@ -222,9 +228,6 @@ struct linux_binprm;
10102 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
10103 int arch_setup_additional_pages(struct linux_binprm *, int);
10104
10105-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
10106-#define arch_randomize_brk arch_randomize_brk
10107-
10108 void *fill_cpu_elf_notes(void *ptr, struct save_area *sa);
10109
10110 #endif
10111diff --git a/arch/s390/include/asm/exec.h b/arch/s390/include/asm/exec.h
10112index c4a93d6..4d2a9b4 100644
10113--- a/arch/s390/include/asm/exec.h
10114+++ b/arch/s390/include/asm/exec.h
10115@@ -7,6 +7,6 @@
10116 #ifndef __ASM_EXEC_H
10117 #define __ASM_EXEC_H
10118
10119-extern unsigned long arch_align_stack(unsigned long sp);
10120+#define arch_align_stack(x) ((x) & ~0xfUL)
10121
10122 #endif /* __ASM_EXEC_H */
10123diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
10124index cd4c68e..6764641 100644
10125--- a/arch/s390/include/asm/uaccess.h
10126+++ b/arch/s390/include/asm/uaccess.h
10127@@ -59,6 +59,7 @@ static inline int __range_ok(unsigned long addr, unsigned long size)
10128 __range_ok((unsigned long)(addr), (size)); \
10129 })
10130
10131+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
10132 #define access_ok(type, addr, size) __access_ok(addr, size)
10133
10134 /*
10135@@ -275,6 +276,10 @@ static inline unsigned long __must_check
10136 copy_to_user(void __user *to, const void *from, unsigned long n)
10137 {
10138 might_fault();
10139+
10140+ if ((long)n < 0)
10141+ return n;
10142+
10143 return __copy_to_user(to, from, n);
10144 }
10145
10146@@ -303,10 +308,14 @@ __compiletime_warning("copy_from_user() buffer size is not provably correct")
10147 static inline unsigned long __must_check
10148 copy_from_user(void *to, const void __user *from, unsigned long n)
10149 {
10150- unsigned int sz = __compiletime_object_size(to);
10151+ size_t sz = __compiletime_object_size(to);
10152
10153 might_fault();
10154- if (unlikely(sz != -1 && sz < n)) {
10155+
10156+ if ((long)n < 0)
10157+ return n;
10158+
10159+ if (unlikely(sz != (size_t)-1 && sz < n)) {
10160 copy_from_user_overflow();
10161 return n;
10162 }
10163diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
10164index b89b591..fd9609d 100644
10165--- a/arch/s390/kernel/module.c
10166+++ b/arch/s390/kernel/module.c
10167@@ -169,11 +169,11 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
10168
10169 /* Increase core size by size of got & plt and set start
10170 offsets for got and plt. */
10171- me->core_size = ALIGN(me->core_size, 4);
10172- me->arch.got_offset = me->core_size;
10173- me->core_size += me->arch.got_size;
10174- me->arch.plt_offset = me->core_size;
10175- me->core_size += me->arch.plt_size;
10176+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
10177+ me->arch.got_offset = me->core_size_rw;
10178+ me->core_size_rw += me->arch.got_size;
10179+ me->arch.plt_offset = me->core_size_rx;
10180+ me->core_size_rx += me->arch.plt_size;
10181 return 0;
10182 }
10183
10184@@ -289,7 +289,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
10185 if (info->got_initialized == 0) {
10186 Elf_Addr *gotent;
10187
10188- gotent = me->module_core + me->arch.got_offset +
10189+ gotent = me->module_core_rw + me->arch.got_offset +
10190 info->got_offset;
10191 *gotent = val;
10192 info->got_initialized = 1;
10193@@ -312,7 +312,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
10194 rc = apply_rela_bits(loc, val, 0, 64, 0);
10195 else if (r_type == R_390_GOTENT ||
10196 r_type == R_390_GOTPLTENT) {
10197- val += (Elf_Addr) me->module_core - loc;
10198+ val += (Elf_Addr) me->module_core_rw - loc;
10199 rc = apply_rela_bits(loc, val, 1, 32, 1);
10200 }
10201 break;
10202@@ -325,7 +325,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
10203 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
10204 if (info->plt_initialized == 0) {
10205 unsigned int *ip;
10206- ip = me->module_core + me->arch.plt_offset +
10207+ ip = me->module_core_rx + me->arch.plt_offset +
10208 info->plt_offset;
10209 #ifndef CONFIG_64BIT
10210 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
10211@@ -350,7 +350,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
10212 val - loc + 0xffffUL < 0x1ffffeUL) ||
10213 (r_type == R_390_PLT32DBL &&
10214 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
10215- val = (Elf_Addr) me->module_core +
10216+ val = (Elf_Addr) me->module_core_rx +
10217 me->arch.plt_offset +
10218 info->plt_offset;
10219 val += rela->r_addend - loc;
10220@@ -372,7 +372,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
10221 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
10222 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
10223 val = val + rela->r_addend -
10224- ((Elf_Addr) me->module_core + me->arch.got_offset);
10225+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
10226 if (r_type == R_390_GOTOFF16)
10227 rc = apply_rela_bits(loc, val, 0, 16, 0);
10228 else if (r_type == R_390_GOTOFF32)
10229@@ -382,7 +382,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
10230 break;
10231 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
10232 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
10233- val = (Elf_Addr) me->module_core + me->arch.got_offset +
10234+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
10235 rela->r_addend - loc;
10236 if (r_type == R_390_GOTPC)
10237 rc = apply_rela_bits(loc, val, 1, 32, 0);
10238diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
10239index 93b9ca4..4ea1454 100644
10240--- a/arch/s390/kernel/process.c
10241+++ b/arch/s390/kernel/process.c
10242@@ -242,37 +242,3 @@ unsigned long get_wchan(struct task_struct *p)
10243 }
10244 return 0;
10245 }
10246-
10247-unsigned long arch_align_stack(unsigned long sp)
10248-{
10249- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
10250- sp -= get_random_int() & ~PAGE_MASK;
10251- return sp & ~0xf;
10252-}
10253-
10254-static inline unsigned long brk_rnd(void)
10255-{
10256- /* 8MB for 32bit, 1GB for 64bit */
10257- if (is_32bit_task())
10258- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
10259- else
10260- return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
10261-}
10262-
10263-unsigned long arch_randomize_brk(struct mm_struct *mm)
10264-{
10265- unsigned long ret;
10266-
10267- ret = PAGE_ALIGN(mm->brk + brk_rnd());
10268- return (ret > mm->brk) ? ret : mm->brk;
10269-}
10270-
10271-unsigned long randomize_et_dyn(unsigned long base)
10272-{
10273- unsigned long ret;
10274-
10275- if (!(current->flags & PF_RANDOMIZE))
10276- return base;
10277- ret = PAGE_ALIGN(base + brk_rnd());
10278- return (ret > base) ? ret : base;
10279-}
10280diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
10281index 9b436c2..54fbf0a 100644
10282--- a/arch/s390/mm/mmap.c
10283+++ b/arch/s390/mm/mmap.c
10284@@ -95,9 +95,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
10285 */
10286 if (mmap_is_legacy()) {
10287 mm->mmap_base = mmap_base_legacy();
10288+
10289+#ifdef CONFIG_PAX_RANDMMAP
10290+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10291+ mm->mmap_base += mm->delta_mmap;
10292+#endif
10293+
10294 mm->get_unmapped_area = arch_get_unmapped_area;
10295 } else {
10296 mm->mmap_base = mmap_base();
10297+
10298+#ifdef CONFIG_PAX_RANDMMAP
10299+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10300+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
10301+#endif
10302+
10303 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
10304 }
10305 }
10306@@ -170,9 +182,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
10307 */
10308 if (mmap_is_legacy()) {
10309 mm->mmap_base = mmap_base_legacy();
10310+
10311+#ifdef CONFIG_PAX_RANDMMAP
10312+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10313+ mm->mmap_base += mm->delta_mmap;
10314+#endif
10315+
10316 mm->get_unmapped_area = s390_get_unmapped_area;
10317 } else {
10318 mm->mmap_base = mmap_base();
10319+
10320+#ifdef CONFIG_PAX_RANDMMAP
10321+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10322+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
10323+#endif
10324+
10325 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
10326 }
10327 }
10328diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
10329index 61e45b7..f2833c5 100644
10330--- a/arch/s390/net/bpf_jit_comp.c
10331+++ b/arch/s390/net/bpf_jit_comp.c
10332@@ -887,5 +887,5 @@ void bpf_jit_free(struct bpf_prog *fp)
10333 module_free(NULL, header);
10334
10335 free_filter:
10336- kfree(fp);
10337+ bpf_prog_unlock_free(fp);
10338 }
10339diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h
10340index ae3d59f..f65f075 100644
10341--- a/arch/score/include/asm/cache.h
10342+++ b/arch/score/include/asm/cache.h
10343@@ -1,7 +1,9 @@
10344 #ifndef _ASM_SCORE_CACHE_H
10345 #define _ASM_SCORE_CACHE_H
10346
10347+#include <linux/const.h>
10348+
10349 #define L1_CACHE_SHIFT 4
10350-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
10351+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
10352
10353 #endif /* _ASM_SCORE_CACHE_H */
10354diff --git a/arch/score/include/asm/exec.h b/arch/score/include/asm/exec.h
10355index f9f3cd5..58ff438 100644
10356--- a/arch/score/include/asm/exec.h
10357+++ b/arch/score/include/asm/exec.h
10358@@ -1,6 +1,6 @@
10359 #ifndef _ASM_SCORE_EXEC_H
10360 #define _ASM_SCORE_EXEC_H
10361
10362-extern unsigned long arch_align_stack(unsigned long sp);
10363+#define arch_align_stack(x) (x)
10364
10365 #endif /* _ASM_SCORE_EXEC_H */
10366diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
10367index a1519ad3..e8ac1ff 100644
10368--- a/arch/score/kernel/process.c
10369+++ b/arch/score/kernel/process.c
10370@@ -116,8 +116,3 @@ unsigned long get_wchan(struct task_struct *task)
10371
10372 return task_pt_regs(task)->cp0_epc;
10373 }
10374-
10375-unsigned long arch_align_stack(unsigned long sp)
10376-{
10377- return sp;
10378-}
10379diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h
10380index ef9e555..331bd29 100644
10381--- a/arch/sh/include/asm/cache.h
10382+++ b/arch/sh/include/asm/cache.h
10383@@ -9,10 +9,11 @@
10384 #define __ASM_SH_CACHE_H
10385 #ifdef __KERNEL__
10386
10387+#include <linux/const.h>
10388 #include <linux/init.h>
10389 #include <cpu/cache.h>
10390
10391-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
10392+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
10393
10394 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
10395
10396diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
10397index 6777177..cb5e44f 100644
10398--- a/arch/sh/mm/mmap.c
10399+++ b/arch/sh/mm/mmap.c
10400@@ -36,6 +36,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
10401 struct mm_struct *mm = current->mm;
10402 struct vm_area_struct *vma;
10403 int do_colour_align;
10404+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
10405 struct vm_unmapped_area_info info;
10406
10407 if (flags & MAP_FIXED) {
10408@@ -55,6 +56,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
10409 if (filp || (flags & MAP_SHARED))
10410 do_colour_align = 1;
10411
10412+#ifdef CONFIG_PAX_RANDMMAP
10413+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
10414+#endif
10415+
10416 if (addr) {
10417 if (do_colour_align)
10418 addr = COLOUR_ALIGN(addr, pgoff);
10419@@ -62,14 +67,13 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
10420 addr = PAGE_ALIGN(addr);
10421
10422 vma = find_vma(mm, addr);
10423- if (TASK_SIZE - len >= addr &&
10424- (!vma || addr + len <= vma->vm_start))
10425+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
10426 return addr;
10427 }
10428
10429 info.flags = 0;
10430 info.length = len;
10431- info.low_limit = TASK_UNMAPPED_BASE;
10432+ info.low_limit = mm->mmap_base;
10433 info.high_limit = TASK_SIZE;
10434 info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0;
10435 info.align_offset = pgoff << PAGE_SHIFT;
10436@@ -85,6 +89,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10437 struct mm_struct *mm = current->mm;
10438 unsigned long addr = addr0;
10439 int do_colour_align;
10440+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
10441 struct vm_unmapped_area_info info;
10442
10443 if (flags & MAP_FIXED) {
10444@@ -104,6 +109,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10445 if (filp || (flags & MAP_SHARED))
10446 do_colour_align = 1;
10447
10448+#ifdef CONFIG_PAX_RANDMMAP
10449+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
10450+#endif
10451+
10452 /* requesting a specific address */
10453 if (addr) {
10454 if (do_colour_align)
10455@@ -112,8 +121,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10456 addr = PAGE_ALIGN(addr);
10457
10458 vma = find_vma(mm, addr);
10459- if (TASK_SIZE - len >= addr &&
10460- (!vma || addr + len <= vma->vm_start))
10461+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
10462 return addr;
10463 }
10464
10465@@ -135,6 +143,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10466 VM_BUG_ON(addr != -ENOMEM);
10467 info.flags = 0;
10468 info.low_limit = TASK_UNMAPPED_BASE;
10469+
10470+#ifdef CONFIG_PAX_RANDMMAP
10471+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10472+ info.low_limit += mm->delta_mmap;
10473+#endif
10474+
10475 info.high_limit = TASK_SIZE;
10476 addr = vm_unmapped_area(&info);
10477 }
10478diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
10479index bb894c8..8141d5c 100644
10480--- a/arch/sparc/include/asm/atomic_64.h
10481+++ b/arch/sparc/include/asm/atomic_64.h
10482@@ -15,18 +15,40 @@
10483 #define ATOMIC64_INIT(i) { (i) }
10484
10485 #define atomic_read(v) (*(volatile int *)&(v)->counter)
10486+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
10487+{
10488+ return v->counter;
10489+}
10490 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
10491+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
10492+{
10493+ return v->counter;
10494+}
10495
10496 #define atomic_set(v, i) (((v)->counter) = i)
10497+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
10498+{
10499+ v->counter = i;
10500+}
10501 #define atomic64_set(v, i) (((v)->counter) = i)
10502+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
10503+{
10504+ v->counter = i;
10505+}
10506
10507 void atomic_add(int, atomic_t *);
10508+void atomic_add_unchecked(int, atomic_unchecked_t *);
10509 void atomic64_add(long, atomic64_t *);
10510+void atomic64_add_unchecked(long, atomic64_unchecked_t *);
10511 void atomic_sub(int, atomic_t *);
10512+void atomic_sub_unchecked(int, atomic_unchecked_t *);
10513 void atomic64_sub(long, atomic64_t *);
10514+void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
10515
10516 int atomic_add_ret(int, atomic_t *);
10517+int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
10518 long atomic64_add_ret(long, atomic64_t *);
10519+long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
10520 int atomic_sub_ret(int, atomic_t *);
10521 long atomic64_sub_ret(long, atomic64_t *);
10522
10523@@ -34,13 +56,29 @@ long atomic64_sub_ret(long, atomic64_t *);
10524 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
10525
10526 #define atomic_inc_return(v) atomic_add_ret(1, v)
10527+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
10528+{
10529+ return atomic_add_ret_unchecked(1, v);
10530+}
10531 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
10532+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
10533+{
10534+ return atomic64_add_ret_unchecked(1, v);
10535+}
10536
10537 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
10538 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
10539
10540 #define atomic_add_return(i, v) atomic_add_ret(i, v)
10541+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
10542+{
10543+ return atomic_add_ret_unchecked(i, v);
10544+}
10545 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
10546+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
10547+{
10548+ return atomic64_add_ret_unchecked(i, v);
10549+}
10550
10551 /*
10552 * atomic_inc_and_test - increment and test
10553@@ -51,6 +89,10 @@ long atomic64_sub_ret(long, atomic64_t *);
10554 * other cases.
10555 */
10556 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
10557+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
10558+{
10559+ return atomic_inc_return_unchecked(v) == 0;
10560+}
10561 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
10562
10563 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
10564@@ -60,25 +102,60 @@ long atomic64_sub_ret(long, atomic64_t *);
10565 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
10566
10567 #define atomic_inc(v) atomic_add(1, v)
10568+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
10569+{
10570+ atomic_add_unchecked(1, v);
10571+}
10572 #define atomic64_inc(v) atomic64_add(1, v)
10573+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
10574+{
10575+ atomic64_add_unchecked(1, v);
10576+}
10577
10578 #define atomic_dec(v) atomic_sub(1, v)
10579+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
10580+{
10581+ atomic_sub_unchecked(1, v);
10582+}
10583 #define atomic64_dec(v) atomic64_sub(1, v)
10584+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
10585+{
10586+ atomic64_sub_unchecked(1, v);
10587+}
10588
10589 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
10590 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
10591
10592 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
10593+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
10594+{
10595+ return cmpxchg(&v->counter, old, new);
10596+}
10597 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
10598+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
10599+{
10600+ return xchg(&v->counter, new);
10601+}
10602
10603 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
10604 {
10605- int c, old;
10606+ int c, old, new;
10607 c = atomic_read(v);
10608 for (;;) {
10609- if (unlikely(c == (u)))
10610+ if (unlikely(c == u))
10611 break;
10612- old = atomic_cmpxchg((v), c, c + (a));
10613+
10614+ asm volatile("addcc %2, %0, %0\n"
10615+
10616+#ifdef CONFIG_PAX_REFCOUNT
10617+ "tvs %%icc, 6\n"
10618+#endif
10619+
10620+ : "=r" (new)
10621+ : "0" (c), "ir" (a)
10622+ : "cc");
10623+
10624+ old = atomic_cmpxchg(v, c, new);
10625 if (likely(old == c))
10626 break;
10627 c = old;
10628@@ -89,20 +166,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
10629 #define atomic64_cmpxchg(v, o, n) \
10630 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
10631 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
10632+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
10633+{
10634+ return xchg(&v->counter, new);
10635+}
10636
10637 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
10638 {
10639- long c, old;
10640+ long c, old, new;
10641 c = atomic64_read(v);
10642 for (;;) {
10643- if (unlikely(c == (u)))
10644+ if (unlikely(c == u))
10645 break;
10646- old = atomic64_cmpxchg((v), c, c + (a));
10647+
10648+ asm volatile("addcc %2, %0, %0\n"
10649+
10650+#ifdef CONFIG_PAX_REFCOUNT
10651+ "tvs %%xcc, 6\n"
10652+#endif
10653+
10654+ : "=r" (new)
10655+ : "0" (c), "ir" (a)
10656+ : "cc");
10657+
10658+ old = atomic64_cmpxchg(v, c, new);
10659 if (likely(old == c))
10660 break;
10661 c = old;
10662 }
10663- return c != (u);
10664+ return c != u;
10665 }
10666
10667 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
10668diff --git a/arch/sparc/include/asm/barrier_64.h b/arch/sparc/include/asm/barrier_64.h
10669index 305dcc3..7835030 100644
10670--- a/arch/sparc/include/asm/barrier_64.h
10671+++ b/arch/sparc/include/asm/barrier_64.h
10672@@ -57,7 +57,7 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \
10673 do { \
10674 compiletime_assert_atomic_type(*p); \
10675 barrier(); \
10676- ACCESS_ONCE(*p) = (v); \
10677+ ACCESS_ONCE_RW(*p) = (v); \
10678 } while (0)
10679
10680 #define smp_load_acquire(p) \
10681diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
10682index 5bb6991..5c2132e 100644
10683--- a/arch/sparc/include/asm/cache.h
10684+++ b/arch/sparc/include/asm/cache.h
10685@@ -7,10 +7,12 @@
10686 #ifndef _SPARC_CACHE_H
10687 #define _SPARC_CACHE_H
10688
10689+#include <linux/const.h>
10690+
10691 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
10692
10693 #define L1_CACHE_SHIFT 5
10694-#define L1_CACHE_BYTES 32
10695+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
10696
10697 #ifdef CONFIG_SPARC32
10698 #define SMP_CACHE_BYTES_SHIFT 5
10699diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
10700index a24e41f..47677ff 100644
10701--- a/arch/sparc/include/asm/elf_32.h
10702+++ b/arch/sparc/include/asm/elf_32.h
10703@@ -114,6 +114,13 @@ typedef struct {
10704
10705 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
10706
10707+#ifdef CONFIG_PAX_ASLR
10708+#define PAX_ELF_ET_DYN_BASE 0x10000UL
10709+
10710+#define PAX_DELTA_MMAP_LEN 16
10711+#define PAX_DELTA_STACK_LEN 16
10712+#endif
10713+
10714 /* This yields a mask that user programs can use to figure out what
10715 instruction set this cpu supports. This can NOT be done in userspace
10716 on Sparc. */
10717diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
10718index 370ca1e..d4f4a98 100644
10719--- a/arch/sparc/include/asm/elf_64.h
10720+++ b/arch/sparc/include/asm/elf_64.h
10721@@ -189,6 +189,13 @@ typedef struct {
10722 #define ELF_ET_DYN_BASE 0x0000010000000000UL
10723 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
10724
10725+#ifdef CONFIG_PAX_ASLR
10726+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
10727+
10728+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
10729+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
10730+#endif
10731+
10732 extern unsigned long sparc64_elf_hwcap;
10733 #define ELF_HWCAP sparc64_elf_hwcap
10734
10735diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h
10736index a3890da..f6a408e 100644
10737--- a/arch/sparc/include/asm/pgalloc_32.h
10738+++ b/arch/sparc/include/asm/pgalloc_32.h
10739@@ -35,6 +35,7 @@ static inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
10740 }
10741
10742 #define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
10743+#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
10744
10745 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm,
10746 unsigned long address)
10747diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
10748index 5e31871..b71c9d7 100644
10749--- a/arch/sparc/include/asm/pgalloc_64.h
10750+++ b/arch/sparc/include/asm/pgalloc_64.h
10751@@ -38,6 +38,7 @@ static inline void __pud_populate(pud_t *pud, pmd_t *pmd)
10752 }
10753
10754 #define pud_populate(MM, PUD, PMD) __pud_populate(PUD, PMD)
10755+#define pud_populate_kernel(MM, PUD, PMD) pud_populate((MM), (PUD), (PMD))
10756
10757 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
10758 {
10759diff --git a/arch/sparc/include/asm/pgtable.h b/arch/sparc/include/asm/pgtable.h
10760index 59ba6f6..4518128 100644
10761--- a/arch/sparc/include/asm/pgtable.h
10762+++ b/arch/sparc/include/asm/pgtable.h
10763@@ -5,4 +5,8 @@
10764 #else
10765 #include <asm/pgtable_32.h>
10766 #endif
10767+
10768+#define ktla_ktva(addr) (addr)
10769+#define ktva_ktla(addr) (addr)
10770+
10771 #endif
10772diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
10773index b9b91ae..950b91e 100644
10774--- a/arch/sparc/include/asm/pgtable_32.h
10775+++ b/arch/sparc/include/asm/pgtable_32.h
10776@@ -51,6 +51,9 @@ unsigned long __init bootmem_init(unsigned long *pages_avail);
10777 #define PAGE_SHARED SRMMU_PAGE_SHARED
10778 #define PAGE_COPY SRMMU_PAGE_COPY
10779 #define PAGE_READONLY SRMMU_PAGE_RDONLY
10780+#define PAGE_SHARED_NOEXEC SRMMU_PAGE_SHARED_NOEXEC
10781+#define PAGE_COPY_NOEXEC SRMMU_PAGE_COPY_NOEXEC
10782+#define PAGE_READONLY_NOEXEC SRMMU_PAGE_RDONLY_NOEXEC
10783 #define PAGE_KERNEL SRMMU_PAGE_KERNEL
10784
10785 /* Top-level page directory - dummy used by init-mm.
10786@@ -63,18 +66,18 @@ extern unsigned long ptr_in_current_pgd;
10787
10788 /* xwr */
10789 #define __P000 PAGE_NONE
10790-#define __P001 PAGE_READONLY
10791-#define __P010 PAGE_COPY
10792-#define __P011 PAGE_COPY
10793+#define __P001 PAGE_READONLY_NOEXEC
10794+#define __P010 PAGE_COPY_NOEXEC
10795+#define __P011 PAGE_COPY_NOEXEC
10796 #define __P100 PAGE_READONLY
10797 #define __P101 PAGE_READONLY
10798 #define __P110 PAGE_COPY
10799 #define __P111 PAGE_COPY
10800
10801 #define __S000 PAGE_NONE
10802-#define __S001 PAGE_READONLY
10803-#define __S010 PAGE_SHARED
10804-#define __S011 PAGE_SHARED
10805+#define __S001 PAGE_READONLY_NOEXEC
10806+#define __S010 PAGE_SHARED_NOEXEC
10807+#define __S011 PAGE_SHARED_NOEXEC
10808 #define __S100 PAGE_READONLY
10809 #define __S101 PAGE_READONLY
10810 #define __S110 PAGE_SHARED
10811diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
10812index 79da178..c2eede8 100644
10813--- a/arch/sparc/include/asm/pgtsrmmu.h
10814+++ b/arch/sparc/include/asm/pgtsrmmu.h
10815@@ -115,6 +115,11 @@
10816 SRMMU_EXEC | SRMMU_REF)
10817 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
10818 SRMMU_EXEC | SRMMU_REF)
10819+
10820+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
10821+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
10822+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
10823+
10824 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
10825 SRMMU_DIRTY | SRMMU_REF)
10826
10827diff --git a/arch/sparc/include/asm/setup.h b/arch/sparc/include/asm/setup.h
10828index 29d64b1..4272fe8 100644
10829--- a/arch/sparc/include/asm/setup.h
10830+++ b/arch/sparc/include/asm/setup.h
10831@@ -55,8 +55,8 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs);
10832 void handle_ld_nf(u32 insn, struct pt_regs *regs);
10833
10834 /* init_64.c */
10835-extern atomic_t dcpage_flushes;
10836-extern atomic_t dcpage_flushes_xcall;
10837+extern atomic_unchecked_t dcpage_flushes;
10838+extern atomic_unchecked_t dcpage_flushes_xcall;
10839
10840 extern int sysctl_tsb_ratio;
10841 #endif
10842diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
10843index 9689176..63c18ea 100644
10844--- a/arch/sparc/include/asm/spinlock_64.h
10845+++ b/arch/sparc/include/asm/spinlock_64.h
10846@@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
10847
10848 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
10849
10850-static void inline arch_read_lock(arch_rwlock_t *lock)
10851+static inline void arch_read_lock(arch_rwlock_t *lock)
10852 {
10853 unsigned long tmp1, tmp2;
10854
10855 __asm__ __volatile__ (
10856 "1: ldsw [%2], %0\n"
10857 " brlz,pn %0, 2f\n"
10858-"4: add %0, 1, %1\n"
10859+"4: addcc %0, 1, %1\n"
10860+
10861+#ifdef CONFIG_PAX_REFCOUNT
10862+" tvs %%icc, 6\n"
10863+#endif
10864+
10865 " cas [%2], %0, %1\n"
10866 " cmp %0, %1\n"
10867 " bne,pn %%icc, 1b\n"
10868@@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
10869 " .previous"
10870 : "=&r" (tmp1), "=&r" (tmp2)
10871 : "r" (lock)
10872- : "memory");
10873+ : "memory", "cc");
10874 }
10875
10876-static int inline arch_read_trylock(arch_rwlock_t *lock)
10877+static inline int arch_read_trylock(arch_rwlock_t *lock)
10878 {
10879 int tmp1, tmp2;
10880
10881@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
10882 "1: ldsw [%2], %0\n"
10883 " brlz,a,pn %0, 2f\n"
10884 " mov 0, %0\n"
10885-" add %0, 1, %1\n"
10886+" addcc %0, 1, %1\n"
10887+
10888+#ifdef CONFIG_PAX_REFCOUNT
10889+" tvs %%icc, 6\n"
10890+#endif
10891+
10892 " cas [%2], %0, %1\n"
10893 " cmp %0, %1\n"
10894 " bne,pn %%icc, 1b\n"
10895@@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
10896 return tmp1;
10897 }
10898
10899-static void inline arch_read_unlock(arch_rwlock_t *lock)
10900+static inline void arch_read_unlock(arch_rwlock_t *lock)
10901 {
10902 unsigned long tmp1, tmp2;
10903
10904 __asm__ __volatile__(
10905 "1: lduw [%2], %0\n"
10906-" sub %0, 1, %1\n"
10907+" subcc %0, 1, %1\n"
10908+
10909+#ifdef CONFIG_PAX_REFCOUNT
10910+" tvs %%icc, 6\n"
10911+#endif
10912+
10913 " cas [%2], %0, %1\n"
10914 " cmp %0, %1\n"
10915 " bne,pn %%xcc, 1b\n"
10916@@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
10917 : "memory");
10918 }
10919
10920-static void inline arch_write_lock(arch_rwlock_t *lock)
10921+static inline void arch_write_lock(arch_rwlock_t *lock)
10922 {
10923 unsigned long mask, tmp1, tmp2;
10924
10925@@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
10926 : "memory");
10927 }
10928
10929-static void inline arch_write_unlock(arch_rwlock_t *lock)
10930+static inline void arch_write_unlock(arch_rwlock_t *lock)
10931 {
10932 __asm__ __volatile__(
10933 " stw %%g0, [%0]"
10934@@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
10935 : "memory");
10936 }
10937
10938-static int inline arch_write_trylock(arch_rwlock_t *lock)
10939+static inline int arch_write_trylock(arch_rwlock_t *lock)
10940 {
10941 unsigned long mask, tmp1, tmp2, result;
10942
10943diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
10944index 96efa7a..16858bf 100644
10945--- a/arch/sparc/include/asm/thread_info_32.h
10946+++ b/arch/sparc/include/asm/thread_info_32.h
10947@@ -49,6 +49,8 @@ struct thread_info {
10948 unsigned long w_saved;
10949
10950 struct restart_block restart_block;
10951+
10952+ unsigned long lowest_stack;
10953 };
10954
10955 /*
10956diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
10957index cc6275c..7eb8e21 100644
10958--- a/arch/sparc/include/asm/thread_info_64.h
10959+++ b/arch/sparc/include/asm/thread_info_64.h
10960@@ -63,6 +63,8 @@ struct thread_info {
10961 struct pt_regs *kern_una_regs;
10962 unsigned int kern_una_insn;
10963
10964+ unsigned long lowest_stack;
10965+
10966 unsigned long fpregs[(7 * 256) / sizeof(unsigned long)]
10967 __attribute__ ((aligned(64)));
10968 };
10969@@ -190,12 +192,13 @@ register struct thread_info *current_thread_info_reg asm("g6");
10970 #define TIF_NEED_RESCHED 3 /* rescheduling necessary */
10971 /* flag bit 4 is available */
10972 #define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */
10973-/* flag bit 6 is available */
10974+#define TIF_GRSEC_SETXID 6 /* update credentials on syscall entry/exit */
10975 #define TIF_32BIT 7 /* 32-bit binary */
10976 #define TIF_NOHZ 8 /* in adaptive nohz mode */
10977 #define TIF_SECCOMP 9 /* secure computing */
10978 #define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */
10979 #define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */
10980+
10981 /* NOTE: Thread flags >= 12 should be ones we have no interest
10982 * in using in assembly, else we can't use the mask as
10983 * an immediate value in instructions such as andcc.
10984@@ -215,12 +218,18 @@ register struct thread_info *current_thread_info_reg asm("g6");
10985 #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
10986 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
10987 #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
10988+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
10989
10990 #define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \
10991 _TIF_DO_NOTIFY_RESUME_MASK | \
10992 _TIF_NEED_RESCHED)
10993 #define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING)
10994
10995+#define _TIF_WORK_SYSCALL \
10996+ (_TIF_SYSCALL_TRACE | _TIF_SECCOMP | _TIF_SYSCALL_AUDIT | \
10997+ _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ | _TIF_GRSEC_SETXID)
10998+
10999+
11000 /*
11001 * Thread-synchronous status.
11002 *
11003diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
11004index bd56c28..4b63d83 100644
11005--- a/arch/sparc/include/asm/uaccess.h
11006+++ b/arch/sparc/include/asm/uaccess.h
11007@@ -1,5 +1,6 @@
11008 #ifndef ___ASM_SPARC_UACCESS_H
11009 #define ___ASM_SPARC_UACCESS_H
11010+
11011 #if defined(__sparc__) && defined(__arch64__)
11012 #include <asm/uaccess_64.h>
11013 #else
11014diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
11015index 9634d08..f55fe4f 100644
11016--- a/arch/sparc/include/asm/uaccess_32.h
11017+++ b/arch/sparc/include/asm/uaccess_32.h
11018@@ -250,27 +250,46 @@ unsigned long __copy_user(void __user *to, const void __user *from, unsigned lon
11019
11020 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
11021 {
11022- if (n && __access_ok((unsigned long) to, n))
11023+ if ((long)n < 0)
11024+ return n;
11025+
11026+ if (n && __access_ok((unsigned long) to, n)) {
11027+ if (!__builtin_constant_p(n))
11028+ check_object_size(from, n, true);
11029 return __copy_user(to, (__force void __user *) from, n);
11030- else
11031+ } else
11032 return n;
11033 }
11034
11035 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
11036 {
11037+ if ((long)n < 0)
11038+ return n;
11039+
11040+ if (!__builtin_constant_p(n))
11041+ check_object_size(from, n, true);
11042+
11043 return __copy_user(to, (__force void __user *) from, n);
11044 }
11045
11046 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
11047 {
11048- if (n && __access_ok((unsigned long) from, n))
11049+ if ((long)n < 0)
11050+ return n;
11051+
11052+ if (n && __access_ok((unsigned long) from, n)) {
11053+ if (!__builtin_constant_p(n))
11054+ check_object_size(to, n, false);
11055 return __copy_user((__force void __user *) to, from, n);
11056- else
11057+ } else
11058 return n;
11059 }
11060
11061 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
11062 {
11063+ if ((long)n < 0)
11064+ return n;
11065+
11066 return __copy_user((__force void __user *) to, from, n);
11067 }
11068
11069diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
11070index c990a5e..f17b9c1 100644
11071--- a/arch/sparc/include/asm/uaccess_64.h
11072+++ b/arch/sparc/include/asm/uaccess_64.h
11073@@ -10,6 +10,7 @@
11074 #include <linux/compiler.h>
11075 #include <linux/string.h>
11076 #include <linux/thread_info.h>
11077+#include <linux/kernel.h>
11078 #include <asm/asi.h>
11079 #include <asm/spitfire.h>
11080 #include <asm-generic/uaccess-unaligned.h>
11081@@ -214,8 +215,15 @@ unsigned long copy_from_user_fixup(void *to, const void __user *from,
11082 static inline unsigned long __must_check
11083 copy_from_user(void *to, const void __user *from, unsigned long size)
11084 {
11085- unsigned long ret = ___copy_from_user(to, from, size);
11086+ unsigned long ret;
11087
11088+ if ((long)size < 0 || size > INT_MAX)
11089+ return size;
11090+
11091+ if (!__builtin_constant_p(size))
11092+ check_object_size(to, size, false);
11093+
11094+ ret = ___copy_from_user(to, from, size);
11095 if (unlikely(ret))
11096 ret = copy_from_user_fixup(to, from, size);
11097
11098@@ -231,8 +239,15 @@ unsigned long copy_to_user_fixup(void __user *to, const void *from,
11099 static inline unsigned long __must_check
11100 copy_to_user(void __user *to, const void *from, unsigned long size)
11101 {
11102- unsigned long ret = ___copy_to_user(to, from, size);
11103+ unsigned long ret;
11104
11105+ if ((long)size < 0 || size > INT_MAX)
11106+ return size;
11107+
11108+ if (!__builtin_constant_p(size))
11109+ check_object_size(from, size, true);
11110+
11111+ ret = ___copy_to_user(to, from, size);
11112 if (unlikely(ret))
11113 ret = copy_to_user_fixup(to, from, size);
11114 return ret;
11115diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
11116index 7cf9c6e..6206648 100644
11117--- a/arch/sparc/kernel/Makefile
11118+++ b/arch/sparc/kernel/Makefile
11119@@ -4,7 +4,7 @@
11120 #
11121
11122 asflags-y := -ansi
11123-ccflags-y := -Werror
11124+#ccflags-y := -Werror
11125
11126 extra-y := head_$(BITS).o
11127
11128diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
11129index 50e7b62..79fae35 100644
11130--- a/arch/sparc/kernel/process_32.c
11131+++ b/arch/sparc/kernel/process_32.c
11132@@ -123,14 +123,14 @@ void show_regs(struct pt_regs *r)
11133
11134 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
11135 r->psr, r->pc, r->npc, r->y, print_tainted());
11136- printk("PC: <%pS>\n", (void *) r->pc);
11137+ printk("PC: <%pA>\n", (void *) r->pc);
11138 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
11139 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
11140 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
11141 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
11142 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
11143 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
11144- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
11145+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
11146
11147 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
11148 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
11149@@ -167,7 +167,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
11150 rw = (struct reg_window32 *) fp;
11151 pc = rw->ins[7];
11152 printk("[%08lx : ", pc);
11153- printk("%pS ] ", (void *) pc);
11154+ printk("%pA ] ", (void *) pc);
11155 fp = rw->ins[6];
11156 } while (++count < 16);
11157 printk("\n");
11158diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
11159index 0be7bf9..2b1cba8 100644
11160--- a/arch/sparc/kernel/process_64.c
11161+++ b/arch/sparc/kernel/process_64.c
11162@@ -161,7 +161,7 @@ static void show_regwindow(struct pt_regs *regs)
11163 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
11164 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
11165 if (regs->tstate & TSTATE_PRIV)
11166- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
11167+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
11168 }
11169
11170 void show_regs(struct pt_regs *regs)
11171@@ -170,7 +170,7 @@ void show_regs(struct pt_regs *regs)
11172
11173 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
11174 regs->tpc, regs->tnpc, regs->y, print_tainted());
11175- printk("TPC: <%pS>\n", (void *) regs->tpc);
11176+ printk("TPC: <%pA>\n", (void *) regs->tpc);
11177 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
11178 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
11179 regs->u_regs[3]);
11180@@ -183,7 +183,7 @@ void show_regs(struct pt_regs *regs)
11181 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
11182 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
11183 regs->u_regs[15]);
11184- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
11185+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
11186 show_regwindow(regs);
11187 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
11188 }
11189@@ -278,7 +278,7 @@ void arch_trigger_all_cpu_backtrace(bool include_self)
11190 ((tp && tp->task) ? tp->task->pid : -1));
11191
11192 if (gp->tstate & TSTATE_PRIV) {
11193- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
11194+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
11195 (void *) gp->tpc,
11196 (void *) gp->o7,
11197 (void *) gp->i7,
11198diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c
11199index 79cc0d1..ec62734 100644
11200--- a/arch/sparc/kernel/prom_common.c
11201+++ b/arch/sparc/kernel/prom_common.c
11202@@ -144,7 +144,7 @@ static int __init prom_common_nextprop(phandle node, char *prev, char *buf)
11203
11204 unsigned int prom_early_allocated __initdata;
11205
11206-static struct of_pdt_ops prom_sparc_ops __initdata = {
11207+static struct of_pdt_ops prom_sparc_ops __initconst = {
11208 .nextprop = prom_common_nextprop,
11209 .getproplen = prom_getproplen,
11210 .getproperty = prom_getproperty,
11211diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
11212index c13c9f2..d572c34 100644
11213--- a/arch/sparc/kernel/ptrace_64.c
11214+++ b/arch/sparc/kernel/ptrace_64.c
11215@@ -1060,6 +1060,10 @@ long arch_ptrace(struct task_struct *child, long request,
11216 return ret;
11217 }
11218
11219+#ifdef CONFIG_GRKERNSEC_SETXID
11220+extern void gr_delayed_cred_worker(void);
11221+#endif
11222+
11223 asmlinkage int syscall_trace_enter(struct pt_regs *regs)
11224 {
11225 int ret = 0;
11226@@ -1070,6 +1074,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
11227 if (test_thread_flag(TIF_NOHZ))
11228 user_exit();
11229
11230+#ifdef CONFIG_GRKERNSEC_SETXID
11231+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
11232+ gr_delayed_cred_worker();
11233+#endif
11234+
11235 if (test_thread_flag(TIF_SYSCALL_TRACE))
11236 ret = tracehook_report_syscall_entry(regs);
11237
11238@@ -1093,6 +1102,11 @@ asmlinkage void syscall_trace_leave(struct pt_regs *regs)
11239 if (test_thread_flag(TIF_NOHZ))
11240 user_exit();
11241
11242+#ifdef CONFIG_GRKERNSEC_SETXID
11243+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
11244+ gr_delayed_cred_worker();
11245+#endif
11246+
11247 audit_syscall_exit(regs);
11248
11249 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
11250diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
11251index c9300bf..b2080cf 100644
11252--- a/arch/sparc/kernel/smp_64.c
11253+++ b/arch/sparc/kernel/smp_64.c
11254@@ -883,7 +883,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
11255 return;
11256
11257 #ifdef CONFIG_DEBUG_DCFLUSH
11258- atomic_inc(&dcpage_flushes);
11259+ atomic_inc_unchecked(&dcpage_flushes);
11260 #endif
11261
11262 this_cpu = get_cpu();
11263@@ -907,7 +907,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
11264 xcall_deliver(data0, __pa(pg_addr),
11265 (u64) pg_addr, cpumask_of(cpu));
11266 #ifdef CONFIG_DEBUG_DCFLUSH
11267- atomic_inc(&dcpage_flushes_xcall);
11268+ atomic_inc_unchecked(&dcpage_flushes_xcall);
11269 #endif
11270 }
11271 }
11272@@ -926,7 +926,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
11273 preempt_disable();
11274
11275 #ifdef CONFIG_DEBUG_DCFLUSH
11276- atomic_inc(&dcpage_flushes);
11277+ atomic_inc_unchecked(&dcpage_flushes);
11278 #endif
11279 data0 = 0;
11280 pg_addr = page_address(page);
11281@@ -943,7 +943,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
11282 xcall_deliver(data0, __pa(pg_addr),
11283 (u64) pg_addr, cpu_online_mask);
11284 #ifdef CONFIG_DEBUG_DCFLUSH
11285- atomic_inc(&dcpage_flushes_xcall);
11286+ atomic_inc_unchecked(&dcpage_flushes_xcall);
11287 #endif
11288 }
11289 __local_flush_dcache_page(page);
11290diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
11291index 646988d..b88905f 100644
11292--- a/arch/sparc/kernel/sys_sparc_32.c
11293+++ b/arch/sparc/kernel/sys_sparc_32.c
11294@@ -54,7 +54,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
11295 if (len > TASK_SIZE - PAGE_SIZE)
11296 return -ENOMEM;
11297 if (!addr)
11298- addr = TASK_UNMAPPED_BASE;
11299+ addr = current->mm->mmap_base;
11300
11301 info.flags = 0;
11302 info.length = len;
11303diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
11304index c85403d..6af95c9 100644
11305--- a/arch/sparc/kernel/sys_sparc_64.c
11306+++ b/arch/sparc/kernel/sys_sparc_64.c
11307@@ -89,13 +89,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
11308 struct vm_area_struct * vma;
11309 unsigned long task_size = TASK_SIZE;
11310 int do_color_align;
11311+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
11312 struct vm_unmapped_area_info info;
11313
11314 if (flags & MAP_FIXED) {
11315 /* We do not accept a shared mapping if it would violate
11316 * cache aliasing constraints.
11317 */
11318- if ((flags & MAP_SHARED) &&
11319+ if ((filp || (flags & MAP_SHARED)) &&
11320 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
11321 return -EINVAL;
11322 return addr;
11323@@ -110,6 +111,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
11324 if (filp || (flags & MAP_SHARED))
11325 do_color_align = 1;
11326
11327+#ifdef CONFIG_PAX_RANDMMAP
11328+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
11329+#endif
11330+
11331 if (addr) {
11332 if (do_color_align)
11333 addr = COLOR_ALIGN(addr, pgoff);
11334@@ -117,22 +122,28 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
11335 addr = PAGE_ALIGN(addr);
11336
11337 vma = find_vma(mm, addr);
11338- if (task_size - len >= addr &&
11339- (!vma || addr + len <= vma->vm_start))
11340+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
11341 return addr;
11342 }
11343
11344 info.flags = 0;
11345 info.length = len;
11346- info.low_limit = TASK_UNMAPPED_BASE;
11347+ info.low_limit = mm->mmap_base;
11348 info.high_limit = min(task_size, VA_EXCLUDE_START);
11349 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
11350 info.align_offset = pgoff << PAGE_SHIFT;
11351+ info.threadstack_offset = offset;
11352 addr = vm_unmapped_area(&info);
11353
11354 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
11355 VM_BUG_ON(addr != -ENOMEM);
11356 info.low_limit = VA_EXCLUDE_END;
11357+
11358+#ifdef CONFIG_PAX_RANDMMAP
11359+ if (mm->pax_flags & MF_PAX_RANDMMAP)
11360+ info.low_limit += mm->delta_mmap;
11361+#endif
11362+
11363 info.high_limit = task_size;
11364 addr = vm_unmapped_area(&info);
11365 }
11366@@ -150,6 +161,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
11367 unsigned long task_size = STACK_TOP32;
11368 unsigned long addr = addr0;
11369 int do_color_align;
11370+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
11371 struct vm_unmapped_area_info info;
11372
11373 /* This should only ever run for 32-bit processes. */
11374@@ -159,7 +171,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
11375 /* We do not accept a shared mapping if it would violate
11376 * cache aliasing constraints.
11377 */
11378- if ((flags & MAP_SHARED) &&
11379+ if ((filp || (flags & MAP_SHARED)) &&
11380 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
11381 return -EINVAL;
11382 return addr;
11383@@ -172,6 +184,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
11384 if (filp || (flags & MAP_SHARED))
11385 do_color_align = 1;
11386
11387+#ifdef CONFIG_PAX_RANDMMAP
11388+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
11389+#endif
11390+
11391 /* requesting a specific address */
11392 if (addr) {
11393 if (do_color_align)
11394@@ -180,8 +196,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
11395 addr = PAGE_ALIGN(addr);
11396
11397 vma = find_vma(mm, addr);
11398- if (task_size - len >= addr &&
11399- (!vma || addr + len <= vma->vm_start))
11400+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
11401 return addr;
11402 }
11403
11404@@ -191,6 +206,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
11405 info.high_limit = mm->mmap_base;
11406 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
11407 info.align_offset = pgoff << PAGE_SHIFT;
11408+ info.threadstack_offset = offset;
11409 addr = vm_unmapped_area(&info);
11410
11411 /*
11412@@ -203,6 +219,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
11413 VM_BUG_ON(addr != -ENOMEM);
11414 info.flags = 0;
11415 info.low_limit = TASK_UNMAPPED_BASE;
11416+
11417+#ifdef CONFIG_PAX_RANDMMAP
11418+ if (mm->pax_flags & MF_PAX_RANDMMAP)
11419+ info.low_limit += mm->delta_mmap;
11420+#endif
11421+
11422 info.high_limit = STACK_TOP32;
11423 addr = vm_unmapped_area(&info);
11424 }
11425@@ -259,10 +281,14 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, u
11426 EXPORT_SYMBOL(get_fb_unmapped_area);
11427
11428 /* Essentially the same as PowerPC. */
11429-static unsigned long mmap_rnd(void)
11430+static unsigned long mmap_rnd(struct mm_struct *mm)
11431 {
11432 unsigned long rnd = 0UL;
11433
11434+#ifdef CONFIG_PAX_RANDMMAP
11435+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
11436+#endif
11437+
11438 if (current->flags & PF_RANDOMIZE) {
11439 unsigned long val = get_random_int();
11440 if (test_thread_flag(TIF_32BIT))
11441@@ -275,7 +301,7 @@ static unsigned long mmap_rnd(void)
11442
11443 void arch_pick_mmap_layout(struct mm_struct *mm)
11444 {
11445- unsigned long random_factor = mmap_rnd();
11446+ unsigned long random_factor = mmap_rnd(mm);
11447 unsigned long gap;
11448
11449 /*
11450@@ -288,6 +314,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
11451 gap == RLIM_INFINITY ||
11452 sysctl_legacy_va_layout) {
11453 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
11454+
11455+#ifdef CONFIG_PAX_RANDMMAP
11456+ if (mm->pax_flags & MF_PAX_RANDMMAP)
11457+ mm->mmap_base += mm->delta_mmap;
11458+#endif
11459+
11460 mm->get_unmapped_area = arch_get_unmapped_area;
11461 } else {
11462 /* We know it's 32-bit */
11463@@ -299,6 +331,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
11464 gap = (task_size / 6 * 5);
11465
11466 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
11467+
11468+#ifdef CONFIG_PAX_RANDMMAP
11469+ if (mm->pax_flags & MF_PAX_RANDMMAP)
11470+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
11471+#endif
11472+
11473 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
11474 }
11475 }
11476diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
11477index 33a17e7..d87fb1f 100644
11478--- a/arch/sparc/kernel/syscalls.S
11479+++ b/arch/sparc/kernel/syscalls.S
11480@@ -52,7 +52,7 @@ sys32_rt_sigreturn:
11481 #endif
11482 .align 32
11483 1: ldx [%g6 + TI_FLAGS], %l5
11484- andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
11485+ andcc %l5, _TIF_WORK_SYSCALL, %g0
11486 be,pt %icc, rtrap
11487 nop
11488 call syscall_trace_leave
11489@@ -184,7 +184,7 @@ linux_sparc_syscall32:
11490
11491 srl %i3, 0, %o3 ! IEU0
11492 srl %i2, 0, %o2 ! IEU0 Group
11493- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
11494+ andcc %l0, _TIF_WORK_SYSCALL, %g0
11495 bne,pn %icc, linux_syscall_trace32 ! CTI
11496 mov %i0, %l5 ! IEU1
11497 5: call %l7 ! CTI Group brk forced
11498@@ -208,7 +208,7 @@ linux_sparc_syscall:
11499
11500 mov %i3, %o3 ! IEU1
11501 mov %i4, %o4 ! IEU0 Group
11502- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
11503+ andcc %l0, _TIF_WORK_SYSCALL, %g0
11504 bne,pn %icc, linux_syscall_trace ! CTI Group
11505 mov %i0, %l5 ! IEU0
11506 2: call %l7 ! CTI Group brk forced
11507@@ -223,7 +223,7 @@ ret_sys_call:
11508
11509 cmp %o0, -ERESTART_RESTARTBLOCK
11510 bgeu,pn %xcc, 1f
11511- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
11512+ andcc %l0, _TIF_WORK_SYSCALL, %g0
11513 ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
11514
11515 2:
11516diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
11517index 6fd386c5..6907d81 100644
11518--- a/arch/sparc/kernel/traps_32.c
11519+++ b/arch/sparc/kernel/traps_32.c
11520@@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
11521 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
11522 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
11523
11524+extern void gr_handle_kernel_exploit(void);
11525+
11526 void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
11527 {
11528 static int die_counter;
11529@@ -76,15 +78,17 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
11530 count++ < 30 &&
11531 (((unsigned long) rw) >= PAGE_OFFSET) &&
11532 !(((unsigned long) rw) & 0x7)) {
11533- printk("Caller[%08lx]: %pS\n", rw->ins[7],
11534+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
11535 (void *) rw->ins[7]);
11536 rw = (struct reg_window32 *)rw->ins[6];
11537 }
11538 }
11539 printk("Instruction DUMP:");
11540 instruction_dump ((unsigned long *) regs->pc);
11541- if(regs->psr & PSR_PS)
11542+ if(regs->psr & PSR_PS) {
11543+ gr_handle_kernel_exploit();
11544 do_exit(SIGKILL);
11545+ }
11546 do_exit(SIGSEGV);
11547 }
11548
11549diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
11550index 981a769..d906eda 100644
11551--- a/arch/sparc/kernel/traps_64.c
11552+++ b/arch/sparc/kernel/traps_64.c
11553@@ -79,7 +79,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
11554 i + 1,
11555 p->trapstack[i].tstate, p->trapstack[i].tpc,
11556 p->trapstack[i].tnpc, p->trapstack[i].tt);
11557- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
11558+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
11559 }
11560 }
11561
11562@@ -99,6 +99,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
11563
11564 lvl -= 0x100;
11565 if (regs->tstate & TSTATE_PRIV) {
11566+
11567+#ifdef CONFIG_PAX_REFCOUNT
11568+ if (lvl == 6)
11569+ pax_report_refcount_overflow(regs);
11570+#endif
11571+
11572 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
11573 die_if_kernel(buffer, regs);
11574 }
11575@@ -117,11 +123,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
11576 void bad_trap_tl1(struct pt_regs *regs, long lvl)
11577 {
11578 char buffer[32];
11579-
11580+
11581 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
11582 0, lvl, SIGTRAP) == NOTIFY_STOP)
11583 return;
11584
11585+#ifdef CONFIG_PAX_REFCOUNT
11586+ if (lvl == 6)
11587+ pax_report_refcount_overflow(regs);
11588+#endif
11589+
11590 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
11591
11592 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
11593@@ -1151,7 +1162,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
11594 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
11595 printk("%s" "ERROR(%d): ",
11596 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
11597- printk("TPC<%pS>\n", (void *) regs->tpc);
11598+ printk("TPC<%pA>\n", (void *) regs->tpc);
11599 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
11600 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
11601 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
11602@@ -1758,7 +1769,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
11603 smp_processor_id(),
11604 (type & 0x1) ? 'I' : 'D',
11605 regs->tpc);
11606- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
11607+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
11608 panic("Irrecoverable Cheetah+ parity error.");
11609 }
11610
11611@@ -1766,7 +1777,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
11612 smp_processor_id(),
11613 (type & 0x1) ? 'I' : 'D',
11614 regs->tpc);
11615- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
11616+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
11617 }
11618
11619 struct sun4v_error_entry {
11620@@ -1839,8 +1850,8 @@ struct sun4v_error_entry {
11621 /*0x38*/u64 reserved_5;
11622 };
11623
11624-static atomic_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
11625-static atomic_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
11626+static atomic_unchecked_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
11627+static atomic_unchecked_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
11628
11629 static const char *sun4v_err_type_to_str(u8 type)
11630 {
11631@@ -1932,7 +1943,7 @@ static void sun4v_report_real_raddr(const char *pfx, struct pt_regs *regs)
11632 }
11633
11634 static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent,
11635- int cpu, const char *pfx, atomic_t *ocnt)
11636+ int cpu, const char *pfx, atomic_unchecked_t *ocnt)
11637 {
11638 u64 *raw_ptr = (u64 *) ent;
11639 u32 attrs;
11640@@ -1990,8 +2001,8 @@ static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent,
11641
11642 show_regs(regs);
11643
11644- if ((cnt = atomic_read(ocnt)) != 0) {
11645- atomic_set(ocnt, 0);
11646+ if ((cnt = atomic_read_unchecked(ocnt)) != 0) {
11647+ atomic_set_unchecked(ocnt, 0);
11648 wmb();
11649 printk("%s: Queue overflowed %d times.\n",
11650 pfx, cnt);
11651@@ -2048,7 +2059,7 @@ out:
11652 */
11653 void sun4v_resum_overflow(struct pt_regs *regs)
11654 {
11655- atomic_inc(&sun4v_resum_oflow_cnt);
11656+ atomic_inc_unchecked(&sun4v_resum_oflow_cnt);
11657 }
11658
11659 /* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate.
11660@@ -2101,7 +2112,7 @@ void sun4v_nonresum_overflow(struct pt_regs *regs)
11661 /* XXX Actually even this can make not that much sense. Perhaps
11662 * XXX we should just pull the plug and panic directly from here?
11663 */
11664- atomic_inc(&sun4v_nonresum_oflow_cnt);
11665+ atomic_inc_unchecked(&sun4v_nonresum_oflow_cnt);
11666 }
11667
11668 static void sun4v_tlb_error(struct pt_regs *regs)
11669@@ -2120,9 +2131,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
11670
11671 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
11672 regs->tpc, tl);
11673- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
11674+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
11675 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
11676- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
11677+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
11678 (void *) regs->u_regs[UREG_I7]);
11679 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
11680 "pte[%lx] error[%lx]\n",
11681@@ -2143,9 +2154,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
11682
11683 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
11684 regs->tpc, tl);
11685- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
11686+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
11687 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
11688- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
11689+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
11690 (void *) regs->u_regs[UREG_I7]);
11691 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
11692 "pte[%lx] error[%lx]\n",
11693@@ -2362,13 +2373,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
11694 fp = (unsigned long)sf->fp + STACK_BIAS;
11695 }
11696
11697- printk(" [%016lx] %pS\n", pc, (void *) pc);
11698+ printk(" [%016lx] %pA\n", pc, (void *) pc);
11699 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
11700 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
11701 int index = tsk->curr_ret_stack;
11702 if (tsk->ret_stack && index >= graph) {
11703 pc = tsk->ret_stack[index - graph].ret;
11704- printk(" [%016lx] %pS\n", pc, (void *) pc);
11705+ printk(" [%016lx] %pA\n", pc, (void *) pc);
11706 graph++;
11707 }
11708 }
11709@@ -2386,6 +2397,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
11710 return (struct reg_window *) (fp + STACK_BIAS);
11711 }
11712
11713+extern void gr_handle_kernel_exploit(void);
11714+
11715 void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
11716 {
11717 static int die_counter;
11718@@ -2414,7 +2427,7 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
11719 while (rw &&
11720 count++ < 30 &&
11721 kstack_valid(tp, (unsigned long) rw)) {
11722- printk("Caller[%016lx]: %pS\n", rw->ins[7],
11723+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
11724 (void *) rw->ins[7]);
11725
11726 rw = kernel_stack_up(rw);
11727@@ -2427,8 +2440,10 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
11728 }
11729 user_instruction_dump ((unsigned int __user *) regs->tpc);
11730 }
11731- if (regs->tstate & TSTATE_PRIV)
11732+ if (regs->tstate & TSTATE_PRIV) {
11733+ gr_handle_kernel_exploit();
11734 do_exit(SIGKILL);
11735+ }
11736 do_exit(SIGSEGV);
11737 }
11738 EXPORT_SYMBOL(die_if_kernel);
11739diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
11740index 62098a8..547ab2c 100644
11741--- a/arch/sparc/kernel/unaligned_64.c
11742+++ b/arch/sparc/kernel/unaligned_64.c
11743@@ -297,7 +297,7 @@ static void log_unaligned(struct pt_regs *regs)
11744 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
11745
11746 if (__ratelimit(&ratelimit)) {
11747- printk("Kernel unaligned access at TPC[%lx] %pS\n",
11748+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
11749 regs->tpc, (void *) regs->tpc);
11750 }
11751 }
11752diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
11753index 3269b02..64f5231 100644
11754--- a/arch/sparc/lib/Makefile
11755+++ b/arch/sparc/lib/Makefile
11756@@ -2,7 +2,7 @@
11757 #
11758
11759 asflags-y := -ansi -DST_DIV0=0x02
11760-ccflags-y := -Werror
11761+#ccflags-y := -Werror
11762
11763 lib-$(CONFIG_SPARC32) += ashrdi3.o
11764 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
11765diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
11766index 85c233d..68500e0 100644
11767--- a/arch/sparc/lib/atomic_64.S
11768+++ b/arch/sparc/lib/atomic_64.S
11769@@ -17,7 +17,12 @@
11770 ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */
11771 BACKOFF_SETUP(%o2)
11772 1: lduw [%o1], %g1
11773- add %g1, %o0, %g7
11774+ addcc %g1, %o0, %g7
11775+
11776+#ifdef CONFIG_PAX_REFCOUNT
11777+ tvs %icc, 6
11778+#endif
11779+
11780 cas [%o1], %g1, %g7
11781 cmp %g1, %g7
11782 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
11783@@ -27,10 +32,28 @@ ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */
11784 2: BACKOFF_SPIN(%o2, %o3, 1b)
11785 ENDPROC(atomic_add)
11786
11787+ENTRY(atomic_add_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
11788+ BACKOFF_SETUP(%o2)
11789+1: lduw [%o1], %g1
11790+ add %g1, %o0, %g7
11791+ cas [%o1], %g1, %g7
11792+ cmp %g1, %g7
11793+ bne,pn %icc, 2f
11794+ nop
11795+ retl
11796+ nop
11797+2: BACKOFF_SPIN(%o2, %o3, 1b)
11798+ENDPROC(atomic_add_unchecked)
11799+
11800 ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */
11801 BACKOFF_SETUP(%o2)
11802 1: lduw [%o1], %g1
11803- sub %g1, %o0, %g7
11804+ subcc %g1, %o0, %g7
11805+
11806+#ifdef CONFIG_PAX_REFCOUNT
11807+ tvs %icc, 6
11808+#endif
11809+
11810 cas [%o1], %g1, %g7
11811 cmp %g1, %g7
11812 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
11813@@ -40,10 +63,28 @@ ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */
11814 2: BACKOFF_SPIN(%o2, %o3, 1b)
11815 ENDPROC(atomic_sub)
11816
11817+ENTRY(atomic_sub_unchecked) /* %o0 = decrement, %o1 = atomic_ptr */
11818+ BACKOFF_SETUP(%o2)
11819+1: lduw [%o1], %g1
11820+ sub %g1, %o0, %g7
11821+ cas [%o1], %g1, %g7
11822+ cmp %g1, %g7
11823+ bne,pn %icc, 2f
11824+ nop
11825+ retl
11826+ nop
11827+2: BACKOFF_SPIN(%o2, %o3, 1b)
11828+ENDPROC(atomic_sub_unchecked)
11829+
11830 ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
11831 BACKOFF_SETUP(%o2)
11832 1: lduw [%o1], %g1
11833- add %g1, %o0, %g7
11834+ addcc %g1, %o0, %g7
11835+
11836+#ifdef CONFIG_PAX_REFCOUNT
11837+ tvs %icc, 6
11838+#endif
11839+
11840 cas [%o1], %g1, %g7
11841 cmp %g1, %g7
11842 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
11843@@ -53,10 +94,29 @@ ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
11844 2: BACKOFF_SPIN(%o2, %o3, 1b)
11845 ENDPROC(atomic_add_ret)
11846
11847+ENTRY(atomic_add_ret_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
11848+ BACKOFF_SETUP(%o2)
11849+1: lduw [%o1], %g1
11850+ addcc %g1, %o0, %g7
11851+ cas [%o1], %g1, %g7
11852+ cmp %g1, %g7
11853+ bne,pn %icc, 2f
11854+ add %g7, %o0, %g7
11855+ sra %g7, 0, %o0
11856+ retl
11857+ nop
11858+2: BACKOFF_SPIN(%o2, %o3, 1b)
11859+ENDPROC(atomic_add_ret_unchecked)
11860+
11861 ENTRY(atomic_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
11862 BACKOFF_SETUP(%o2)
11863 1: lduw [%o1], %g1
11864- sub %g1, %o0, %g7
11865+ subcc %g1, %o0, %g7
11866+
11867+#ifdef CONFIG_PAX_REFCOUNT
11868+ tvs %icc, 6
11869+#endif
11870+
11871 cas [%o1], %g1, %g7
11872 cmp %g1, %g7
11873 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
11874@@ -69,7 +129,12 @@ ENDPROC(atomic_sub_ret)
11875 ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */
11876 BACKOFF_SETUP(%o2)
11877 1: ldx [%o1], %g1
11878- add %g1, %o0, %g7
11879+ addcc %g1, %o0, %g7
11880+
11881+#ifdef CONFIG_PAX_REFCOUNT
11882+ tvs %xcc, 6
11883+#endif
11884+
11885 casx [%o1], %g1, %g7
11886 cmp %g1, %g7
11887 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
11888@@ -79,10 +144,28 @@ ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */
11889 2: BACKOFF_SPIN(%o2, %o3, 1b)
11890 ENDPROC(atomic64_add)
11891
11892+ENTRY(atomic64_add_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
11893+ BACKOFF_SETUP(%o2)
11894+1: ldx [%o1], %g1
11895+ addcc %g1, %o0, %g7
11896+ casx [%o1], %g1, %g7
11897+ cmp %g1, %g7
11898+ bne,pn %xcc, 2f
11899+ nop
11900+ retl
11901+ nop
11902+2: BACKOFF_SPIN(%o2, %o3, 1b)
11903+ENDPROC(atomic64_add_unchecked)
11904+
11905 ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */
11906 BACKOFF_SETUP(%o2)
11907 1: ldx [%o1], %g1
11908- sub %g1, %o0, %g7
11909+ subcc %g1, %o0, %g7
11910+
11911+#ifdef CONFIG_PAX_REFCOUNT
11912+ tvs %xcc, 6
11913+#endif
11914+
11915 casx [%o1], %g1, %g7
11916 cmp %g1, %g7
11917 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
11918@@ -92,10 +175,28 @@ ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */
11919 2: BACKOFF_SPIN(%o2, %o3, 1b)
11920 ENDPROC(atomic64_sub)
11921
11922+ENTRY(atomic64_sub_unchecked) /* %o0 = decrement, %o1 = atomic_ptr */
11923+ BACKOFF_SETUP(%o2)
11924+1: ldx [%o1], %g1
11925+ subcc %g1, %o0, %g7
11926+ casx [%o1], %g1, %g7
11927+ cmp %g1, %g7
11928+ bne,pn %xcc, 2f
11929+ nop
11930+ retl
11931+ nop
11932+2: BACKOFF_SPIN(%o2, %o3, 1b)
11933+ENDPROC(atomic64_sub_unchecked)
11934+
11935 ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
11936 BACKOFF_SETUP(%o2)
11937 1: ldx [%o1], %g1
11938- add %g1, %o0, %g7
11939+ addcc %g1, %o0, %g7
11940+
11941+#ifdef CONFIG_PAX_REFCOUNT
11942+ tvs %xcc, 6
11943+#endif
11944+
11945 casx [%o1], %g1, %g7
11946 cmp %g1, %g7
11947 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
11948@@ -105,10 +206,29 @@ ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
11949 2: BACKOFF_SPIN(%o2, %o3, 1b)
11950 ENDPROC(atomic64_add_ret)
11951
11952+ENTRY(atomic64_add_ret_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
11953+ BACKOFF_SETUP(%o2)
11954+1: ldx [%o1], %g1
11955+ addcc %g1, %o0, %g7
11956+ casx [%o1], %g1, %g7
11957+ cmp %g1, %g7
11958+ bne,pn %xcc, 2f
11959+ add %g7, %o0, %g7
11960+ mov %g7, %o0
11961+ retl
11962+ nop
11963+2: BACKOFF_SPIN(%o2, %o3, 1b)
11964+ENDPROC(atomic64_add_ret_unchecked)
11965+
11966 ENTRY(atomic64_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
11967 BACKOFF_SETUP(%o2)
11968 1: ldx [%o1], %g1
11969- sub %g1, %o0, %g7
11970+ subcc %g1, %o0, %g7
11971+
11972+#ifdef CONFIG_PAX_REFCOUNT
11973+ tvs %xcc, 6
11974+#endif
11975+
11976 casx [%o1], %g1, %g7
11977 cmp %g1, %g7
11978 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
11979diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
11980index 323335b..ed85ea2 100644
11981--- a/arch/sparc/lib/ksyms.c
11982+++ b/arch/sparc/lib/ksyms.c
11983@@ -100,12 +100,18 @@ EXPORT_SYMBOL(__clear_user);
11984
11985 /* Atomic counter implementation. */
11986 EXPORT_SYMBOL(atomic_add);
11987+EXPORT_SYMBOL(atomic_add_unchecked);
11988 EXPORT_SYMBOL(atomic_add_ret);
11989+EXPORT_SYMBOL(atomic_add_ret_unchecked);
11990 EXPORT_SYMBOL(atomic_sub);
11991+EXPORT_SYMBOL(atomic_sub_unchecked);
11992 EXPORT_SYMBOL(atomic_sub_ret);
11993 EXPORT_SYMBOL(atomic64_add);
11994+EXPORT_SYMBOL(atomic64_add_unchecked);
11995 EXPORT_SYMBOL(atomic64_add_ret);
11996+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
11997 EXPORT_SYMBOL(atomic64_sub);
11998+EXPORT_SYMBOL(atomic64_sub_unchecked);
11999 EXPORT_SYMBOL(atomic64_sub_ret);
12000 EXPORT_SYMBOL(atomic64_dec_if_positive);
12001
12002diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
12003index 30c3ecc..736f015 100644
12004--- a/arch/sparc/mm/Makefile
12005+++ b/arch/sparc/mm/Makefile
12006@@ -2,7 +2,7 @@
12007 #
12008
12009 asflags-y := -ansi
12010-ccflags-y := -Werror
12011+#ccflags-y := -Werror
12012
12013 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
12014 obj-y += fault_$(BITS).o
12015diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
12016index 908e8c1..1524793 100644
12017--- a/arch/sparc/mm/fault_32.c
12018+++ b/arch/sparc/mm/fault_32.c
12019@@ -21,6 +21,9 @@
12020 #include <linux/perf_event.h>
12021 #include <linux/interrupt.h>
12022 #include <linux/kdebug.h>
12023+#include <linux/slab.h>
12024+#include <linux/pagemap.h>
12025+#include <linux/compiler.h>
12026
12027 #include <asm/page.h>
12028 #include <asm/pgtable.h>
12029@@ -156,6 +159,277 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
12030 return safe_compute_effective_address(regs, insn);
12031 }
12032
12033+#ifdef CONFIG_PAX_PAGEEXEC
12034+#ifdef CONFIG_PAX_DLRESOLVE
12035+static void pax_emuplt_close(struct vm_area_struct *vma)
12036+{
12037+ vma->vm_mm->call_dl_resolve = 0UL;
12038+}
12039+
12040+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
12041+{
12042+ unsigned int *kaddr;
12043+
12044+ vmf->page = alloc_page(GFP_HIGHUSER);
12045+ if (!vmf->page)
12046+ return VM_FAULT_OOM;
12047+
12048+ kaddr = kmap(vmf->page);
12049+ memset(kaddr, 0, PAGE_SIZE);
12050+ kaddr[0] = 0x9DE3BFA8U; /* save */
12051+ flush_dcache_page(vmf->page);
12052+ kunmap(vmf->page);
12053+ return VM_FAULT_MAJOR;
12054+}
12055+
12056+static const struct vm_operations_struct pax_vm_ops = {
12057+ .close = pax_emuplt_close,
12058+ .fault = pax_emuplt_fault
12059+};
12060+
12061+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
12062+{
12063+ int ret;
12064+
12065+ INIT_LIST_HEAD(&vma->anon_vma_chain);
12066+ vma->vm_mm = current->mm;
12067+ vma->vm_start = addr;
12068+ vma->vm_end = addr + PAGE_SIZE;
12069+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
12070+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
12071+ vma->vm_ops = &pax_vm_ops;
12072+
12073+ ret = insert_vm_struct(current->mm, vma);
12074+ if (ret)
12075+ return ret;
12076+
12077+ ++current->mm->total_vm;
12078+ return 0;
12079+}
12080+#endif
12081+
12082+/*
12083+ * PaX: decide what to do with offenders (regs->pc = fault address)
12084+ *
12085+ * returns 1 when task should be killed
12086+ * 2 when patched PLT trampoline was detected
12087+ * 3 when unpatched PLT trampoline was detected
12088+ */
12089+static int pax_handle_fetch_fault(struct pt_regs *regs)
12090+{
12091+
12092+#ifdef CONFIG_PAX_EMUPLT
12093+ int err;
12094+
12095+ do { /* PaX: patched PLT emulation #1 */
12096+ unsigned int sethi1, sethi2, jmpl;
12097+
12098+ err = get_user(sethi1, (unsigned int *)regs->pc);
12099+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
12100+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
12101+
12102+ if (err)
12103+ break;
12104+
12105+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
12106+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
12107+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
12108+ {
12109+ unsigned int addr;
12110+
12111+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
12112+ addr = regs->u_regs[UREG_G1];
12113+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
12114+ regs->pc = addr;
12115+ regs->npc = addr+4;
12116+ return 2;
12117+ }
12118+ } while (0);
12119+
12120+ do { /* PaX: patched PLT emulation #2 */
12121+ unsigned int ba;
12122+
12123+ err = get_user(ba, (unsigned int *)regs->pc);
12124+
12125+ if (err)
12126+ break;
12127+
12128+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
12129+ unsigned int addr;
12130+
12131+ if ((ba & 0xFFC00000U) == 0x30800000U)
12132+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
12133+ else
12134+ addr = regs->pc + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
12135+ regs->pc = addr;
12136+ regs->npc = addr+4;
12137+ return 2;
12138+ }
12139+ } while (0);
12140+
12141+ do { /* PaX: patched PLT emulation #3 */
12142+ unsigned int sethi, bajmpl, nop;
12143+
12144+ err = get_user(sethi, (unsigned int *)regs->pc);
12145+ err |= get_user(bajmpl, (unsigned int *)(regs->pc+4));
12146+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
12147+
12148+ if (err)
12149+ break;
12150+
12151+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
12152+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
12153+ nop == 0x01000000U)
12154+ {
12155+ unsigned int addr;
12156+
12157+ addr = (sethi & 0x003FFFFFU) << 10;
12158+ regs->u_regs[UREG_G1] = addr;
12159+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
12160+ addr += (((bajmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
12161+ else
12162+ addr = regs->pc + ((((bajmpl | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
12163+ regs->pc = addr;
12164+ regs->npc = addr+4;
12165+ return 2;
12166+ }
12167+ } while (0);
12168+
12169+ do { /* PaX: unpatched PLT emulation step 1 */
12170+ unsigned int sethi, ba, nop;
12171+
12172+ err = get_user(sethi, (unsigned int *)regs->pc);
12173+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
12174+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
12175+
12176+ if (err)
12177+ break;
12178+
12179+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
12180+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
12181+ nop == 0x01000000U)
12182+ {
12183+ unsigned int addr, save, call;
12184+
12185+ if ((ba & 0xFFC00000U) == 0x30800000U)
12186+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
12187+ else
12188+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
12189+
12190+ err = get_user(save, (unsigned int *)addr);
12191+ err |= get_user(call, (unsigned int *)(addr+4));
12192+ err |= get_user(nop, (unsigned int *)(addr+8));
12193+ if (err)
12194+ break;
12195+
12196+#ifdef CONFIG_PAX_DLRESOLVE
12197+ if (save == 0x9DE3BFA8U &&
12198+ (call & 0xC0000000U) == 0x40000000U &&
12199+ nop == 0x01000000U)
12200+ {
12201+ struct vm_area_struct *vma;
12202+ unsigned long call_dl_resolve;
12203+
12204+ down_read(&current->mm->mmap_sem);
12205+ call_dl_resolve = current->mm->call_dl_resolve;
12206+ up_read(&current->mm->mmap_sem);
12207+ if (likely(call_dl_resolve))
12208+ goto emulate;
12209+
12210+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
12211+
12212+ down_write(&current->mm->mmap_sem);
12213+ if (current->mm->call_dl_resolve) {
12214+ call_dl_resolve = current->mm->call_dl_resolve;
12215+ up_write(&current->mm->mmap_sem);
12216+ if (vma)
12217+ kmem_cache_free(vm_area_cachep, vma);
12218+ goto emulate;
12219+ }
12220+
12221+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
12222+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
12223+ up_write(&current->mm->mmap_sem);
12224+ if (vma)
12225+ kmem_cache_free(vm_area_cachep, vma);
12226+ return 1;
12227+ }
12228+
12229+ if (pax_insert_vma(vma, call_dl_resolve)) {
12230+ up_write(&current->mm->mmap_sem);
12231+ kmem_cache_free(vm_area_cachep, vma);
12232+ return 1;
12233+ }
12234+
12235+ current->mm->call_dl_resolve = call_dl_resolve;
12236+ up_write(&current->mm->mmap_sem);
12237+
12238+emulate:
12239+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
12240+ regs->pc = call_dl_resolve;
12241+ regs->npc = addr+4;
12242+ return 3;
12243+ }
12244+#endif
12245+
12246+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
12247+ if ((save & 0xFFC00000U) == 0x05000000U &&
12248+ (call & 0xFFFFE000U) == 0x85C0A000U &&
12249+ nop == 0x01000000U)
12250+ {
12251+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
12252+ regs->u_regs[UREG_G2] = addr + 4;
12253+ addr = (save & 0x003FFFFFU) << 10;
12254+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
12255+ regs->pc = addr;
12256+ regs->npc = addr+4;
12257+ return 3;
12258+ }
12259+ }
12260+ } while (0);
12261+
12262+ do { /* PaX: unpatched PLT emulation step 2 */
12263+ unsigned int save, call, nop;
12264+
12265+ err = get_user(save, (unsigned int *)(regs->pc-4));
12266+ err |= get_user(call, (unsigned int *)regs->pc);
12267+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
12268+ if (err)
12269+ break;
12270+
12271+ if (save == 0x9DE3BFA8U &&
12272+ (call & 0xC0000000U) == 0x40000000U &&
12273+ nop == 0x01000000U)
12274+ {
12275+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
12276+
12277+ regs->u_regs[UREG_RETPC] = regs->pc;
12278+ regs->pc = dl_resolve;
12279+ regs->npc = dl_resolve+4;
12280+ return 3;
12281+ }
12282+ } while (0);
12283+#endif
12284+
12285+ return 1;
12286+}
12287+
12288+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
12289+{
12290+ unsigned long i;
12291+
12292+ printk(KERN_ERR "PAX: bytes at PC: ");
12293+ for (i = 0; i < 8; i++) {
12294+ unsigned int c;
12295+ if (get_user(c, (unsigned int *)pc+i))
12296+ printk(KERN_CONT "???????? ");
12297+ else
12298+ printk(KERN_CONT "%08x ", c);
12299+ }
12300+ printk("\n");
12301+}
12302+#endif
12303+
12304 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
12305 int text_fault)
12306 {
12307@@ -226,6 +500,24 @@ good_area:
12308 if (!(vma->vm_flags & VM_WRITE))
12309 goto bad_area;
12310 } else {
12311+
12312+#ifdef CONFIG_PAX_PAGEEXEC
12313+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
12314+ up_read(&mm->mmap_sem);
12315+ switch (pax_handle_fetch_fault(regs)) {
12316+
12317+#ifdef CONFIG_PAX_EMUPLT
12318+ case 2:
12319+ case 3:
12320+ return;
12321+#endif
12322+
12323+ }
12324+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
12325+ do_group_exit(SIGKILL);
12326+ }
12327+#endif
12328+
12329 /* Allow reads even for write-only mappings */
12330 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
12331 goto bad_area;
12332diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
12333index 18fcd71..e4fe821 100644
12334--- a/arch/sparc/mm/fault_64.c
12335+++ b/arch/sparc/mm/fault_64.c
12336@@ -22,6 +22,9 @@
12337 #include <linux/kdebug.h>
12338 #include <linux/percpu.h>
12339 #include <linux/context_tracking.h>
12340+#include <linux/slab.h>
12341+#include <linux/pagemap.h>
12342+#include <linux/compiler.h>
12343
12344 #include <asm/page.h>
12345 #include <asm/pgtable.h>
12346@@ -76,7 +79,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
12347 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
12348 regs->tpc);
12349 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
12350- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
12351+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
12352 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
12353 dump_stack();
12354 unhandled_fault(regs->tpc, current, regs);
12355@@ -279,6 +282,466 @@ static void noinline __kprobes bogus_32bit_fault_tpc(struct pt_regs *regs)
12356 show_regs(regs);
12357 }
12358
12359+#ifdef CONFIG_PAX_PAGEEXEC
12360+#ifdef CONFIG_PAX_DLRESOLVE
12361+static void pax_emuplt_close(struct vm_area_struct *vma)
12362+{
12363+ vma->vm_mm->call_dl_resolve = 0UL;
12364+}
12365+
12366+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
12367+{
12368+ unsigned int *kaddr;
12369+
12370+ vmf->page = alloc_page(GFP_HIGHUSER);
12371+ if (!vmf->page)
12372+ return VM_FAULT_OOM;
12373+
12374+ kaddr = kmap(vmf->page);
12375+ memset(kaddr, 0, PAGE_SIZE);
12376+ kaddr[0] = 0x9DE3BFA8U; /* save */
12377+ flush_dcache_page(vmf->page);
12378+ kunmap(vmf->page);
12379+ return VM_FAULT_MAJOR;
12380+}
12381+
12382+static const struct vm_operations_struct pax_vm_ops = {
12383+ .close = pax_emuplt_close,
12384+ .fault = pax_emuplt_fault
12385+};
12386+
12387+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
12388+{
12389+ int ret;
12390+
12391+ INIT_LIST_HEAD(&vma->anon_vma_chain);
12392+ vma->vm_mm = current->mm;
12393+ vma->vm_start = addr;
12394+ vma->vm_end = addr + PAGE_SIZE;
12395+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
12396+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
12397+ vma->vm_ops = &pax_vm_ops;
12398+
12399+ ret = insert_vm_struct(current->mm, vma);
12400+ if (ret)
12401+ return ret;
12402+
12403+ ++current->mm->total_vm;
12404+ return 0;
12405+}
12406+#endif
12407+
12408+/*
12409+ * PaX: decide what to do with offenders (regs->tpc = fault address)
12410+ *
12411+ * returns 1 when task should be killed
12412+ * 2 when patched PLT trampoline was detected
12413+ * 3 when unpatched PLT trampoline was detected
12414+ */
12415+static int pax_handle_fetch_fault(struct pt_regs *regs)
12416+{
12417+
12418+#ifdef CONFIG_PAX_EMUPLT
12419+ int err;
12420+
12421+ do { /* PaX: patched PLT emulation #1 */
12422+ unsigned int sethi1, sethi2, jmpl;
12423+
12424+ err = get_user(sethi1, (unsigned int *)regs->tpc);
12425+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
12426+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
12427+
12428+ if (err)
12429+ break;
12430+
12431+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
12432+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
12433+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
12434+ {
12435+ unsigned long addr;
12436+
12437+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
12438+ addr = regs->u_regs[UREG_G1];
12439+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
12440+
12441+ if (test_thread_flag(TIF_32BIT))
12442+ addr &= 0xFFFFFFFFUL;
12443+
12444+ regs->tpc = addr;
12445+ regs->tnpc = addr+4;
12446+ return 2;
12447+ }
12448+ } while (0);
12449+
12450+ do { /* PaX: patched PLT emulation #2 */
12451+ unsigned int ba;
12452+
12453+ err = get_user(ba, (unsigned int *)regs->tpc);
12454+
12455+ if (err)
12456+ break;
12457+
12458+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
12459+ unsigned long addr;
12460+
12461+ if ((ba & 0xFFC00000U) == 0x30800000U)
12462+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
12463+ else
12464+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
12465+
12466+ if (test_thread_flag(TIF_32BIT))
12467+ addr &= 0xFFFFFFFFUL;
12468+
12469+ regs->tpc = addr;
12470+ regs->tnpc = addr+4;
12471+ return 2;
12472+ }
12473+ } while (0);
12474+
12475+ do { /* PaX: patched PLT emulation #3 */
12476+ unsigned int sethi, bajmpl, nop;
12477+
12478+ err = get_user(sethi, (unsigned int *)regs->tpc);
12479+ err |= get_user(bajmpl, (unsigned int *)(regs->tpc+4));
12480+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
12481+
12482+ if (err)
12483+ break;
12484+
12485+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
12486+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
12487+ nop == 0x01000000U)
12488+ {
12489+ unsigned long addr;
12490+
12491+ addr = (sethi & 0x003FFFFFU) << 10;
12492+ regs->u_regs[UREG_G1] = addr;
12493+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
12494+ addr += (((bajmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
12495+ else
12496+ addr = regs->tpc + ((((bajmpl | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
12497+
12498+ if (test_thread_flag(TIF_32BIT))
12499+ addr &= 0xFFFFFFFFUL;
12500+
12501+ regs->tpc = addr;
12502+ regs->tnpc = addr+4;
12503+ return 2;
12504+ }
12505+ } while (0);
12506+
12507+ do { /* PaX: patched PLT emulation #4 */
12508+ unsigned int sethi, mov1, call, mov2;
12509+
12510+ err = get_user(sethi, (unsigned int *)regs->tpc);
12511+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
12512+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
12513+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
12514+
12515+ if (err)
12516+ break;
12517+
12518+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
12519+ mov1 == 0x8210000FU &&
12520+ (call & 0xC0000000U) == 0x40000000U &&
12521+ mov2 == 0x9E100001U)
12522+ {
12523+ unsigned long addr;
12524+
12525+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
12526+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
12527+
12528+ if (test_thread_flag(TIF_32BIT))
12529+ addr &= 0xFFFFFFFFUL;
12530+
12531+ regs->tpc = addr;
12532+ regs->tnpc = addr+4;
12533+ return 2;
12534+ }
12535+ } while (0);
12536+
12537+ do { /* PaX: patched PLT emulation #5 */
12538+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
12539+
12540+ err = get_user(sethi, (unsigned int *)regs->tpc);
12541+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
12542+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
12543+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
12544+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
12545+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
12546+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
12547+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
12548+
12549+ if (err)
12550+ break;
12551+
12552+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
12553+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
12554+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
12555+ (or1 & 0xFFFFE000U) == 0x82106000U &&
12556+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
12557+ sllx == 0x83287020U &&
12558+ jmpl == 0x81C04005U &&
12559+ nop == 0x01000000U)
12560+ {
12561+ unsigned long addr;
12562+
12563+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
12564+ regs->u_regs[UREG_G1] <<= 32;
12565+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
12566+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
12567+ regs->tpc = addr;
12568+ regs->tnpc = addr+4;
12569+ return 2;
12570+ }
12571+ } while (0);
12572+
12573+ do { /* PaX: patched PLT emulation #6 */
12574+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
12575+
12576+ err = get_user(sethi, (unsigned int *)regs->tpc);
12577+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
12578+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
12579+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
12580+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
12581+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
12582+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
12583+
12584+ if (err)
12585+ break;
12586+
12587+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
12588+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
12589+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
12590+ sllx == 0x83287020U &&
12591+ (or & 0xFFFFE000U) == 0x8A116000U &&
12592+ jmpl == 0x81C04005U &&
12593+ nop == 0x01000000U)
12594+ {
12595+ unsigned long addr;
12596+
12597+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
12598+ regs->u_regs[UREG_G1] <<= 32;
12599+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
12600+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
12601+ regs->tpc = addr;
12602+ regs->tnpc = addr+4;
12603+ return 2;
12604+ }
12605+ } while (0);
12606+
12607+ do { /* PaX: unpatched PLT emulation step 1 */
12608+ unsigned int sethi, ba, nop;
12609+
12610+ err = get_user(sethi, (unsigned int *)regs->tpc);
12611+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
12612+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
12613+
12614+ if (err)
12615+ break;
12616+
12617+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
12618+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
12619+ nop == 0x01000000U)
12620+ {
12621+ unsigned long addr;
12622+ unsigned int save, call;
12623+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
12624+
12625+ if ((ba & 0xFFC00000U) == 0x30800000U)
12626+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
12627+ else
12628+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
12629+
12630+ if (test_thread_flag(TIF_32BIT))
12631+ addr &= 0xFFFFFFFFUL;
12632+
12633+ err = get_user(save, (unsigned int *)addr);
12634+ err |= get_user(call, (unsigned int *)(addr+4));
12635+ err |= get_user(nop, (unsigned int *)(addr+8));
12636+ if (err)
12637+ break;
12638+
12639+#ifdef CONFIG_PAX_DLRESOLVE
12640+ if (save == 0x9DE3BFA8U &&
12641+ (call & 0xC0000000U) == 0x40000000U &&
12642+ nop == 0x01000000U)
12643+ {
12644+ struct vm_area_struct *vma;
12645+ unsigned long call_dl_resolve;
12646+
12647+ down_read(&current->mm->mmap_sem);
12648+ call_dl_resolve = current->mm->call_dl_resolve;
12649+ up_read(&current->mm->mmap_sem);
12650+ if (likely(call_dl_resolve))
12651+ goto emulate;
12652+
12653+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
12654+
12655+ down_write(&current->mm->mmap_sem);
12656+ if (current->mm->call_dl_resolve) {
12657+ call_dl_resolve = current->mm->call_dl_resolve;
12658+ up_write(&current->mm->mmap_sem);
12659+ if (vma)
12660+ kmem_cache_free(vm_area_cachep, vma);
12661+ goto emulate;
12662+ }
12663+
12664+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
12665+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
12666+ up_write(&current->mm->mmap_sem);
12667+ if (vma)
12668+ kmem_cache_free(vm_area_cachep, vma);
12669+ return 1;
12670+ }
12671+
12672+ if (pax_insert_vma(vma, call_dl_resolve)) {
12673+ up_write(&current->mm->mmap_sem);
12674+ kmem_cache_free(vm_area_cachep, vma);
12675+ return 1;
12676+ }
12677+
12678+ current->mm->call_dl_resolve = call_dl_resolve;
12679+ up_write(&current->mm->mmap_sem);
12680+
12681+emulate:
12682+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
12683+ regs->tpc = call_dl_resolve;
12684+ regs->tnpc = addr+4;
12685+ return 3;
12686+ }
12687+#endif
12688+
12689+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
12690+ if ((save & 0xFFC00000U) == 0x05000000U &&
12691+ (call & 0xFFFFE000U) == 0x85C0A000U &&
12692+ nop == 0x01000000U)
12693+ {
12694+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
12695+ regs->u_regs[UREG_G2] = addr + 4;
12696+ addr = (save & 0x003FFFFFU) << 10;
12697+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
12698+
12699+ if (test_thread_flag(TIF_32BIT))
12700+ addr &= 0xFFFFFFFFUL;
12701+
12702+ regs->tpc = addr;
12703+ regs->tnpc = addr+4;
12704+ return 3;
12705+ }
12706+
12707+ /* PaX: 64-bit PLT stub */
12708+ err = get_user(sethi1, (unsigned int *)addr);
12709+ err |= get_user(sethi2, (unsigned int *)(addr+4));
12710+ err |= get_user(or1, (unsigned int *)(addr+8));
12711+ err |= get_user(or2, (unsigned int *)(addr+12));
12712+ err |= get_user(sllx, (unsigned int *)(addr+16));
12713+ err |= get_user(add, (unsigned int *)(addr+20));
12714+ err |= get_user(jmpl, (unsigned int *)(addr+24));
12715+ err |= get_user(nop, (unsigned int *)(addr+28));
12716+ if (err)
12717+ break;
12718+
12719+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
12720+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
12721+ (or1 & 0xFFFFE000U) == 0x88112000U &&
12722+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
12723+ sllx == 0x89293020U &&
12724+ add == 0x8A010005U &&
12725+ jmpl == 0x89C14000U &&
12726+ nop == 0x01000000U)
12727+ {
12728+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
12729+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
12730+ regs->u_regs[UREG_G4] <<= 32;
12731+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
12732+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
12733+ regs->u_regs[UREG_G4] = addr + 24;
12734+ addr = regs->u_regs[UREG_G5];
12735+ regs->tpc = addr;
12736+ regs->tnpc = addr+4;
12737+ return 3;
12738+ }
12739+ }
12740+ } while (0);
12741+
12742+#ifdef CONFIG_PAX_DLRESOLVE
12743+ do { /* PaX: unpatched PLT emulation step 2 */
12744+ unsigned int save, call, nop;
12745+
12746+ err = get_user(save, (unsigned int *)(regs->tpc-4));
12747+ err |= get_user(call, (unsigned int *)regs->tpc);
12748+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
12749+ if (err)
12750+ break;
12751+
12752+ if (save == 0x9DE3BFA8U &&
12753+ (call & 0xC0000000U) == 0x40000000U &&
12754+ nop == 0x01000000U)
12755+ {
12756+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
12757+
12758+ if (test_thread_flag(TIF_32BIT))
12759+ dl_resolve &= 0xFFFFFFFFUL;
12760+
12761+ regs->u_regs[UREG_RETPC] = regs->tpc;
12762+ regs->tpc = dl_resolve;
12763+ regs->tnpc = dl_resolve+4;
12764+ return 3;
12765+ }
12766+ } while (0);
12767+#endif
12768+
12769+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
12770+ unsigned int sethi, ba, nop;
12771+
12772+ err = get_user(sethi, (unsigned int *)regs->tpc);
12773+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
12774+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
12775+
12776+ if (err)
12777+ break;
12778+
12779+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
12780+ (ba & 0xFFF00000U) == 0x30600000U &&
12781+ nop == 0x01000000U)
12782+ {
12783+ unsigned long addr;
12784+
12785+ addr = (sethi & 0x003FFFFFU) << 10;
12786+ regs->u_regs[UREG_G1] = addr;
12787+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
12788+
12789+ if (test_thread_flag(TIF_32BIT))
12790+ addr &= 0xFFFFFFFFUL;
12791+
12792+ regs->tpc = addr;
12793+ regs->tnpc = addr+4;
12794+ return 2;
12795+ }
12796+ } while (0);
12797+
12798+#endif
12799+
12800+ return 1;
12801+}
12802+
12803+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
12804+{
12805+ unsigned long i;
12806+
12807+ printk(KERN_ERR "PAX: bytes at PC: ");
12808+ for (i = 0; i < 8; i++) {
12809+ unsigned int c;
12810+ if (get_user(c, (unsigned int *)pc+i))
12811+ printk(KERN_CONT "???????? ");
12812+ else
12813+ printk(KERN_CONT "%08x ", c);
12814+ }
12815+ printk("\n");
12816+}
12817+#endif
12818+
12819 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
12820 {
12821 enum ctx_state prev_state = exception_enter();
12822@@ -353,6 +816,29 @@ retry:
12823 if (!vma)
12824 goto bad_area;
12825
12826+#ifdef CONFIG_PAX_PAGEEXEC
12827+ /* PaX: detect ITLB misses on non-exec pages */
12828+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
12829+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
12830+ {
12831+ if (address != regs->tpc)
12832+ goto good_area;
12833+
12834+ up_read(&mm->mmap_sem);
12835+ switch (pax_handle_fetch_fault(regs)) {
12836+
12837+#ifdef CONFIG_PAX_EMUPLT
12838+ case 2:
12839+ case 3:
12840+ return;
12841+#endif
12842+
12843+ }
12844+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
12845+ do_group_exit(SIGKILL);
12846+ }
12847+#endif
12848+
12849 /* Pure DTLB misses do not tell us whether the fault causing
12850 * load/store/atomic was a write or not, it only says that there
12851 * was no match. So in such a case we (carefully) read the
12852diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
12853index d329537..2c3746a 100644
12854--- a/arch/sparc/mm/hugetlbpage.c
12855+++ b/arch/sparc/mm/hugetlbpage.c
12856@@ -25,8 +25,10 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
12857 unsigned long addr,
12858 unsigned long len,
12859 unsigned long pgoff,
12860- unsigned long flags)
12861+ unsigned long flags,
12862+ unsigned long offset)
12863 {
12864+ struct mm_struct *mm = current->mm;
12865 unsigned long task_size = TASK_SIZE;
12866 struct vm_unmapped_area_info info;
12867
12868@@ -35,15 +37,22 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
12869
12870 info.flags = 0;
12871 info.length = len;
12872- info.low_limit = TASK_UNMAPPED_BASE;
12873+ info.low_limit = mm->mmap_base;
12874 info.high_limit = min(task_size, VA_EXCLUDE_START);
12875 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
12876 info.align_offset = 0;
12877+ info.threadstack_offset = offset;
12878 addr = vm_unmapped_area(&info);
12879
12880 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
12881 VM_BUG_ON(addr != -ENOMEM);
12882 info.low_limit = VA_EXCLUDE_END;
12883+
12884+#ifdef CONFIG_PAX_RANDMMAP
12885+ if (mm->pax_flags & MF_PAX_RANDMMAP)
12886+ info.low_limit += mm->delta_mmap;
12887+#endif
12888+
12889 info.high_limit = task_size;
12890 addr = vm_unmapped_area(&info);
12891 }
12892@@ -55,7 +64,8 @@ static unsigned long
12893 hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
12894 const unsigned long len,
12895 const unsigned long pgoff,
12896- const unsigned long flags)
12897+ const unsigned long flags,
12898+ const unsigned long offset)
12899 {
12900 struct mm_struct *mm = current->mm;
12901 unsigned long addr = addr0;
12902@@ -70,6 +80,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
12903 info.high_limit = mm->mmap_base;
12904 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
12905 info.align_offset = 0;
12906+ info.threadstack_offset = offset;
12907 addr = vm_unmapped_area(&info);
12908
12909 /*
12910@@ -82,6 +93,12 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
12911 VM_BUG_ON(addr != -ENOMEM);
12912 info.flags = 0;
12913 info.low_limit = TASK_UNMAPPED_BASE;
12914+
12915+#ifdef CONFIG_PAX_RANDMMAP
12916+ if (mm->pax_flags & MF_PAX_RANDMMAP)
12917+ info.low_limit += mm->delta_mmap;
12918+#endif
12919+
12920 info.high_limit = STACK_TOP32;
12921 addr = vm_unmapped_area(&info);
12922 }
12923@@ -96,6 +113,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
12924 struct mm_struct *mm = current->mm;
12925 struct vm_area_struct *vma;
12926 unsigned long task_size = TASK_SIZE;
12927+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
12928
12929 if (test_thread_flag(TIF_32BIT))
12930 task_size = STACK_TOP32;
12931@@ -111,19 +129,22 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
12932 return addr;
12933 }
12934
12935+#ifdef CONFIG_PAX_RANDMMAP
12936+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
12937+#endif
12938+
12939 if (addr) {
12940 addr = ALIGN(addr, HPAGE_SIZE);
12941 vma = find_vma(mm, addr);
12942- if (task_size - len >= addr &&
12943- (!vma || addr + len <= vma->vm_start))
12944+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
12945 return addr;
12946 }
12947 if (mm->get_unmapped_area == arch_get_unmapped_area)
12948 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
12949- pgoff, flags);
12950+ pgoff, flags, offset);
12951 else
12952 return hugetlb_get_unmapped_area_topdown(file, addr, len,
12953- pgoff, flags);
12954+ pgoff, flags, offset);
12955 }
12956
12957 pte_t *huge_pte_alloc(struct mm_struct *mm,
12958diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
12959index 04bc826..0fefab9 100644
12960--- a/arch/sparc/mm/init_64.c
12961+++ b/arch/sparc/mm/init_64.c
12962@@ -186,9 +186,9 @@ unsigned long sparc64_kern_sec_context __read_mostly;
12963 int num_kernel_image_mappings;
12964
12965 #ifdef CONFIG_DEBUG_DCFLUSH
12966-atomic_t dcpage_flushes = ATOMIC_INIT(0);
12967+atomic_unchecked_t dcpage_flushes = ATOMIC_INIT(0);
12968 #ifdef CONFIG_SMP
12969-atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0);
12970+atomic_unchecked_t dcpage_flushes_xcall = ATOMIC_INIT(0);
12971 #endif
12972 #endif
12973
12974@@ -196,7 +196,7 @@ inline void flush_dcache_page_impl(struct page *page)
12975 {
12976 BUG_ON(tlb_type == hypervisor);
12977 #ifdef CONFIG_DEBUG_DCFLUSH
12978- atomic_inc(&dcpage_flushes);
12979+ atomic_inc_unchecked(&dcpage_flushes);
12980 #endif
12981
12982 #ifdef DCACHE_ALIASING_POSSIBLE
12983@@ -468,10 +468,10 @@ void mmu_info(struct seq_file *m)
12984
12985 #ifdef CONFIG_DEBUG_DCFLUSH
12986 seq_printf(m, "DCPageFlushes\t: %d\n",
12987- atomic_read(&dcpage_flushes));
12988+ atomic_read_unchecked(&dcpage_flushes));
12989 #ifdef CONFIG_SMP
12990 seq_printf(m, "DCPageFlushesXC\t: %d\n",
12991- atomic_read(&dcpage_flushes_xcall));
12992+ atomic_read_unchecked(&dcpage_flushes_xcall));
12993 #endif /* CONFIG_SMP */
12994 #endif /* CONFIG_DEBUG_DCFLUSH */
12995 }
12996diff --git a/arch/sparc/net/bpf_jit_comp.c b/arch/sparc/net/bpf_jit_comp.c
12997index ece4af0..f04b862 100644
12998--- a/arch/sparc/net/bpf_jit_comp.c
12999+++ b/arch/sparc/net/bpf_jit_comp.c
13000@@ -823,5 +823,6 @@ void bpf_jit_free(struct bpf_prog *fp)
13001 {
13002 if (fp->jited)
13003 module_free(NULL, fp->bpf_func);
13004- kfree(fp);
13005+
13006+ bpf_prog_unlock_free(fp);
13007 }
13008diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
13009index 7fcd492..1311074 100644
13010--- a/arch/tile/Kconfig
13011+++ b/arch/tile/Kconfig
13012@@ -191,6 +191,7 @@ source "kernel/Kconfig.hz"
13013
13014 config KEXEC
13015 bool "kexec system call"
13016+ depends on !GRKERNSEC_KMEM
13017 ---help---
13018 kexec is a system call that implements the ability to shutdown your
13019 current kernel, and to start another kernel. It is like a reboot
13020diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
13021index 7b11c5f..755a026 100644
13022--- a/arch/tile/include/asm/atomic_64.h
13023+++ b/arch/tile/include/asm/atomic_64.h
13024@@ -105,6 +105,16 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
13025
13026 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
13027
13028+#define atomic64_read_unchecked(v) atomic64_read(v)
13029+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
13030+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
13031+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
13032+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
13033+#define atomic64_inc_unchecked(v) atomic64_inc(v)
13034+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
13035+#define atomic64_dec_unchecked(v) atomic64_dec(v)
13036+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
13037+
13038 /* Define this to indicate that cmpxchg is an efficient operation. */
13039 #define __HAVE_ARCH_CMPXCHG
13040
13041diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h
13042index 6160761..00cac88 100644
13043--- a/arch/tile/include/asm/cache.h
13044+++ b/arch/tile/include/asm/cache.h
13045@@ -15,11 +15,12 @@
13046 #ifndef _ASM_TILE_CACHE_H
13047 #define _ASM_TILE_CACHE_H
13048
13049+#include <linux/const.h>
13050 #include <arch/chip.h>
13051
13052 /* bytes per L1 data cache line */
13053 #define L1_CACHE_SHIFT CHIP_L1D_LOG_LINE_SIZE()
13054-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
13055+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
13056
13057 /* bytes per L2 cache line */
13058 #define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE()
13059diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
13060index b6cde32..c0cb736 100644
13061--- a/arch/tile/include/asm/uaccess.h
13062+++ b/arch/tile/include/asm/uaccess.h
13063@@ -414,9 +414,9 @@ static inline unsigned long __must_check copy_from_user(void *to,
13064 const void __user *from,
13065 unsigned long n)
13066 {
13067- int sz = __compiletime_object_size(to);
13068+ size_t sz = __compiletime_object_size(to);
13069
13070- if (likely(sz == -1 || sz >= n))
13071+ if (likely(sz == (size_t)-1 || sz >= n))
13072 n = _copy_from_user(to, from, n);
13073 else
13074 copy_from_user_overflow();
13075diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c
13076index e514899..f8743c4 100644
13077--- a/arch/tile/mm/hugetlbpage.c
13078+++ b/arch/tile/mm/hugetlbpage.c
13079@@ -207,6 +207,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
13080 info.high_limit = TASK_SIZE;
13081 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
13082 info.align_offset = 0;
13083+ info.threadstack_offset = 0;
13084 return vm_unmapped_area(&info);
13085 }
13086
13087@@ -224,6 +225,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
13088 info.high_limit = current->mm->mmap_base;
13089 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
13090 info.align_offset = 0;
13091+ info.threadstack_offset = 0;
13092 addr = vm_unmapped_area(&info);
13093
13094 /*
13095diff --git a/arch/um/Makefile b/arch/um/Makefile
13096index e4b1a96..16162f8 100644
13097--- a/arch/um/Makefile
13098+++ b/arch/um/Makefile
13099@@ -72,6 +72,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
13100 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
13101 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
13102
13103+ifdef CONSTIFY_PLUGIN
13104+USER_CFLAGS += -fplugin-arg-constify_plugin-no-constify
13105+endif
13106+
13107 #This will adjust *FLAGS accordingly to the platform.
13108 include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
13109
13110diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h
13111index 19e1bdd..3665b77 100644
13112--- a/arch/um/include/asm/cache.h
13113+++ b/arch/um/include/asm/cache.h
13114@@ -1,6 +1,7 @@
13115 #ifndef __UM_CACHE_H
13116 #define __UM_CACHE_H
13117
13118+#include <linux/const.h>
13119
13120 #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
13121 # define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
13122@@ -12,6 +13,6 @@
13123 # define L1_CACHE_SHIFT 5
13124 #endif
13125
13126-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
13127+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
13128
13129 #endif
13130diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
13131index 2e0a6b1..a64d0f5 100644
13132--- a/arch/um/include/asm/kmap_types.h
13133+++ b/arch/um/include/asm/kmap_types.h
13134@@ -8,6 +8,6 @@
13135
13136 /* No more #include "asm/arch/kmap_types.h" ! */
13137
13138-#define KM_TYPE_NR 14
13139+#define KM_TYPE_NR 15
13140
13141 #endif
13142diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
13143index 71c5d13..4c7b9f1 100644
13144--- a/arch/um/include/asm/page.h
13145+++ b/arch/um/include/asm/page.h
13146@@ -14,6 +14,9 @@
13147 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
13148 #define PAGE_MASK (~(PAGE_SIZE-1))
13149
13150+#define ktla_ktva(addr) (addr)
13151+#define ktva_ktla(addr) (addr)
13152+
13153 #ifndef __ASSEMBLY__
13154
13155 struct page;
13156diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h
13157index 0032f92..cd151e0 100644
13158--- a/arch/um/include/asm/pgtable-3level.h
13159+++ b/arch/um/include/asm/pgtable-3level.h
13160@@ -58,6 +58,7 @@
13161 #define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
13162 #define pud_populate(mm, pud, pmd) \
13163 set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
13164+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
13165
13166 #ifdef CONFIG_64BIT
13167 #define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
13168diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
13169index f17bca8..48adb87 100644
13170--- a/arch/um/kernel/process.c
13171+++ b/arch/um/kernel/process.c
13172@@ -356,22 +356,6 @@ int singlestepping(void * t)
13173 return 2;
13174 }
13175
13176-/*
13177- * Only x86 and x86_64 have an arch_align_stack().
13178- * All other arches have "#define arch_align_stack(x) (x)"
13179- * in their asm/exec.h
13180- * As this is included in UML from asm-um/system-generic.h,
13181- * we can use it to behave as the subarch does.
13182- */
13183-#ifndef arch_align_stack
13184-unsigned long arch_align_stack(unsigned long sp)
13185-{
13186- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
13187- sp -= get_random_int() % 8192;
13188- return sp & ~0xf;
13189-}
13190-#endif
13191-
13192 unsigned long get_wchan(struct task_struct *p)
13193 {
13194 unsigned long stack_page, sp, ip;
13195diff --git a/arch/unicore32/include/asm/cache.h b/arch/unicore32/include/asm/cache.h
13196index ad8f795..2c7eec6 100644
13197--- a/arch/unicore32/include/asm/cache.h
13198+++ b/arch/unicore32/include/asm/cache.h
13199@@ -12,8 +12,10 @@
13200 #ifndef __UNICORE_CACHE_H__
13201 #define __UNICORE_CACHE_H__
13202
13203-#define L1_CACHE_SHIFT (5)
13204-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
13205+#include <linux/const.h>
13206+
13207+#define L1_CACHE_SHIFT 5
13208+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
13209
13210 /*
13211 * Memory returned by kmalloc() may be used for DMA, so we must make
13212diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
13213index 3632743..630a8bb 100644
13214--- a/arch/x86/Kconfig
13215+++ b/arch/x86/Kconfig
13216@@ -130,7 +130,7 @@ config X86
13217 select RTC_LIB
13218 select HAVE_DEBUG_STACKOVERFLOW
13219 select HAVE_IRQ_EXIT_ON_IRQ_STACK if X86_64
13220- select HAVE_CC_STACKPROTECTOR
13221+ select HAVE_CC_STACKPROTECTOR if X86_64 || !PAX_MEMORY_UDEREF
13222 select GENERIC_CPU_AUTOPROBE
13223 select HAVE_ARCH_AUDITSYSCALL
13224 select ARCH_SUPPORTS_ATOMIC_RMW
13225@@ -258,7 +258,7 @@ config X86_HT
13226
13227 config X86_32_LAZY_GS
13228 def_bool y
13229- depends on X86_32 && !CC_STACKPROTECTOR
13230+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
13231
13232 config ARCH_HWEIGHT_CFLAGS
13233 string
13234@@ -555,6 +555,7 @@ config SCHED_OMIT_FRAME_POINTER
13235
13236 menuconfig HYPERVISOR_GUEST
13237 bool "Linux guest support"
13238+ depends on !GRKERNSEC_CONFIG_AUTO || GRKERNSEC_CONFIG_VIRT_GUEST || (GRKERNSEC_CONFIG_VIRT_HOST && GRKERNSEC_CONFIG_VIRT_XEN)
13239 ---help---
13240 Say Y here to enable options for running Linux under various hyper-
13241 visors. This option enables basic hypervisor detection and platform
13242@@ -1083,6 +1084,7 @@ choice
13243
13244 config NOHIGHMEM
13245 bool "off"
13246+ depends on !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
13247 ---help---
13248 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
13249 However, the address space of 32-bit x86 processors is only 4
13250@@ -1119,6 +1121,7 @@ config NOHIGHMEM
13251
13252 config HIGHMEM4G
13253 bool "4GB"
13254+ depends on !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
13255 ---help---
13256 Select this if you have a 32-bit processor and between 1 and 4
13257 gigabytes of physical RAM.
13258@@ -1171,7 +1174,7 @@ config PAGE_OFFSET
13259 hex
13260 default 0xB0000000 if VMSPLIT_3G_OPT
13261 default 0x80000000 if VMSPLIT_2G
13262- default 0x78000000 if VMSPLIT_2G_OPT
13263+ default 0x70000000 if VMSPLIT_2G_OPT
13264 default 0x40000000 if VMSPLIT_1G
13265 default 0xC0000000
13266 depends on X86_32
13267@@ -1586,6 +1589,7 @@ source kernel/Kconfig.hz
13268
13269 config KEXEC
13270 bool "kexec system call"
13271+ depends on !GRKERNSEC_KMEM
13272 ---help---
13273 kexec is a system call that implements the ability to shutdown your
13274 current kernel, and to start another kernel. It is like a reboot
13275@@ -1771,7 +1775,9 @@ config X86_NEED_RELOCS
13276
13277 config PHYSICAL_ALIGN
13278 hex "Alignment value to which kernel should be aligned"
13279- default "0x200000"
13280+ default "0x1000000"
13281+ range 0x200000 0x1000000 if PAX_KERNEXEC && X86_PAE
13282+ range 0x400000 0x1000000 if PAX_KERNEXEC && !X86_PAE
13283 range 0x2000 0x1000000 if X86_32
13284 range 0x200000 0x1000000 if X86_64
13285 ---help---
13286@@ -1854,6 +1860,7 @@ config COMPAT_VDSO
13287 def_bool n
13288 prompt "Disable the 32-bit vDSO (needed for glibc 2.3.3)"
13289 depends on X86_32 || IA32_EMULATION
13290+ depends on !PAX_PAGEEXEC && !PAX_SEGMEXEC && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
13291 ---help---
13292 Certain buggy versions of glibc will crash if they are
13293 presented with a 32-bit vDSO that is not mapped at the address
13294diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
13295index 6983314..54ad7e8 100644
13296--- a/arch/x86/Kconfig.cpu
13297+++ b/arch/x86/Kconfig.cpu
13298@@ -319,7 +319,7 @@ config X86_PPRO_FENCE
13299
13300 config X86_F00F_BUG
13301 def_bool y
13302- depends on M586MMX || M586TSC || M586 || M486
13303+ depends on (M586MMX || M586TSC || M586 || M486) && !PAX_KERNEXEC
13304
13305 config X86_INVD_BUG
13306 def_bool y
13307@@ -327,7 +327,7 @@ config X86_INVD_BUG
13308
13309 config X86_ALIGNMENT_16
13310 def_bool y
13311- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
13312+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
13313
13314 config X86_INTEL_USERCOPY
13315 def_bool y
13316@@ -369,7 +369,7 @@ config X86_CMPXCHG64
13317 # generates cmov.
13318 config X86_CMOV
13319 def_bool y
13320- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
13321+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
13322
13323 config X86_MINIMUM_CPU_FAMILY
13324 int
13325diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
13326index 61bd2ad..50b625d 100644
13327--- a/arch/x86/Kconfig.debug
13328+++ b/arch/x86/Kconfig.debug
13329@@ -93,7 +93,7 @@ config EFI_PGT_DUMP
13330 config DEBUG_RODATA
13331 bool "Write protect kernel read-only data structures"
13332 default y
13333- depends on DEBUG_KERNEL
13334+ depends on DEBUG_KERNEL && BROKEN
13335 ---help---
13336 Mark the kernel read-only data as write-protected in the pagetables,
13337 in order to catch accidental (and incorrect) writes to such const
13338@@ -111,7 +111,7 @@ config DEBUG_RODATA_TEST
13339
13340 config DEBUG_SET_MODULE_RONX
13341 bool "Set loadable kernel module data as NX and text as RO"
13342- depends on MODULES
13343+ depends on MODULES && BROKEN
13344 ---help---
13345 This option helps catch unintended modifications to loadable
13346 kernel module's text and read-only data. It also prevents execution
13347diff --git a/arch/x86/Makefile b/arch/x86/Makefile
13348index 60087ca..9d9500e 100644
13349--- a/arch/x86/Makefile
13350+++ b/arch/x86/Makefile
13351@@ -68,9 +68,6 @@ ifeq ($(CONFIG_X86_32),y)
13352 # CPU-specific tuning. Anything which can be shared with UML should go here.
13353 include $(srctree)/arch/x86/Makefile_32.cpu
13354 KBUILD_CFLAGS += $(cflags-y)
13355-
13356- # temporary until string.h is fixed
13357- KBUILD_CFLAGS += -ffreestanding
13358 else
13359 BITS := 64
13360 UTS_MACHINE := x86_64
13361@@ -111,6 +108,9 @@ else
13362 KBUILD_CFLAGS += $(call cc-option,-maccumulate-outgoing-args)
13363 endif
13364
13365+# temporary until string.h is fixed
13366+KBUILD_CFLAGS += -ffreestanding
13367+
13368 # Make sure compiler does not have buggy stack-protector support.
13369 ifdef CONFIG_CC_STACKPROTECTOR
13370 cc_has_sp := $(srctree)/scripts/gcc-x86_$(BITS)-has-stack-protector.sh
13371@@ -184,6 +184,7 @@ archheaders:
13372 $(Q)$(MAKE) $(build)=arch/x86/syscalls all
13373
13374 archprepare:
13375+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
13376 ifeq ($(CONFIG_KEXEC_FILE),y)
13377 $(Q)$(MAKE) $(build)=arch/x86/purgatory arch/x86/purgatory/kexec-purgatory.c
13378 endif
13379@@ -274,3 +275,9 @@ define archhelp
13380 echo ' FDINITRD=file initrd for the booted kernel'
13381 echo ' kvmconfig - Enable additional options for guest kernel support'
13382 endef
13383+
13384+define OLD_LD
13385+
13386+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
13387+*** Please upgrade your binutils to 2.18 or newer
13388+endef
13389diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
13390index dbe8dd2..2f0a98f 100644
13391--- a/arch/x86/boot/Makefile
13392+++ b/arch/x86/boot/Makefile
13393@@ -52,6 +52,9 @@ $(obj)/cpustr.h: $(obj)/mkcpustr FORCE
13394 # ---------------------------------------------------------------------------
13395
13396 KBUILD_CFLAGS := $(USERINCLUDE) $(REALMODE_CFLAGS) -D_SETUP
13397+ifdef CONSTIFY_PLUGIN
13398+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
13399+endif
13400 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
13401 GCOV_PROFILE := n
13402
13403diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
13404index 878e4b9..20537ab 100644
13405--- a/arch/x86/boot/bitops.h
13406+++ b/arch/x86/boot/bitops.h
13407@@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
13408 u8 v;
13409 const u32 *p = (const u32 *)addr;
13410
13411- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
13412+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
13413 return v;
13414 }
13415
13416@@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
13417
13418 static inline void set_bit(int nr, void *addr)
13419 {
13420- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
13421+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
13422 }
13423
13424 #endif /* BOOT_BITOPS_H */
13425diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
13426index bd49ec6..94c7f58 100644
13427--- a/arch/x86/boot/boot.h
13428+++ b/arch/x86/boot/boot.h
13429@@ -84,7 +84,7 @@ static inline void io_delay(void)
13430 static inline u16 ds(void)
13431 {
13432 u16 seg;
13433- asm("movw %%ds,%0" : "=rm" (seg));
13434+ asm volatile("movw %%ds,%0" : "=rm" (seg));
13435 return seg;
13436 }
13437
13438diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
13439index 0fcd913..3bb5c42 100644
13440--- a/arch/x86/boot/compressed/Makefile
13441+++ b/arch/x86/boot/compressed/Makefile
13442@@ -16,6 +16,9 @@ KBUILD_CFLAGS += $(cflags-y)
13443 KBUILD_CFLAGS += -mno-mmx -mno-sse
13444 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
13445 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
13446+ifdef CONSTIFY_PLUGIN
13447+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
13448+endif
13449
13450 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
13451 GCOV_PROFILE := n
13452diff --git a/arch/x86/boot/compressed/efi_stub_32.S b/arch/x86/boot/compressed/efi_stub_32.S
13453index a53440e..c3dbf1e 100644
13454--- a/arch/x86/boot/compressed/efi_stub_32.S
13455+++ b/arch/x86/boot/compressed/efi_stub_32.S
13456@@ -46,16 +46,13 @@ ENTRY(efi_call_phys)
13457 * parameter 2, ..., param n. To make things easy, we save the return
13458 * address of efi_call_phys in a global variable.
13459 */
13460- popl %ecx
13461- movl %ecx, saved_return_addr(%edx)
13462- /* get the function pointer into ECX*/
13463- popl %ecx
13464- movl %ecx, efi_rt_function_ptr(%edx)
13465+ popl saved_return_addr(%edx)
13466+ popl efi_rt_function_ptr(%edx)
13467
13468 /*
13469 * 3. Call the physical function.
13470 */
13471- call *%ecx
13472+ call *efi_rt_function_ptr(%edx)
13473
13474 /*
13475 * 4. Balance the stack. And because EAX contain the return value,
13476@@ -67,15 +64,12 @@ ENTRY(efi_call_phys)
13477 1: popl %edx
13478 subl $1b, %edx
13479
13480- movl efi_rt_function_ptr(%edx), %ecx
13481- pushl %ecx
13482+ pushl efi_rt_function_ptr(%edx)
13483
13484 /*
13485 * 10. Push the saved return address onto the stack and return.
13486 */
13487- movl saved_return_addr(%edx), %ecx
13488- pushl %ecx
13489- ret
13490+ jmpl *saved_return_addr(%edx)
13491 ENDPROC(efi_call_phys)
13492 .previous
13493
13494diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
13495index cbed140..5f2ca57 100644
13496--- a/arch/x86/boot/compressed/head_32.S
13497+++ b/arch/x86/boot/compressed/head_32.S
13498@@ -140,10 +140,10 @@ preferred_addr:
13499 addl %eax, %ebx
13500 notl %eax
13501 andl %eax, %ebx
13502- cmpl $LOAD_PHYSICAL_ADDR, %ebx
13503+ cmpl $____LOAD_PHYSICAL_ADDR, %ebx
13504 jge 1f
13505 #endif
13506- movl $LOAD_PHYSICAL_ADDR, %ebx
13507+ movl $____LOAD_PHYSICAL_ADDR, %ebx
13508 1:
13509
13510 /* Target address to relocate to for decompression */
13511diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
13512index 2884e0c..904a2f7 100644
13513--- a/arch/x86/boot/compressed/head_64.S
13514+++ b/arch/x86/boot/compressed/head_64.S
13515@@ -94,10 +94,10 @@ ENTRY(startup_32)
13516 addl %eax, %ebx
13517 notl %eax
13518 andl %eax, %ebx
13519- cmpl $LOAD_PHYSICAL_ADDR, %ebx
13520+ cmpl $____LOAD_PHYSICAL_ADDR, %ebx
13521 jge 1f
13522 #endif
13523- movl $LOAD_PHYSICAL_ADDR, %ebx
13524+ movl $____LOAD_PHYSICAL_ADDR, %ebx
13525 1:
13526
13527 /* Target address to relocate to for decompression */
13528@@ -322,10 +322,10 @@ preferred_addr:
13529 addq %rax, %rbp
13530 notq %rax
13531 andq %rax, %rbp
13532- cmpq $LOAD_PHYSICAL_ADDR, %rbp
13533+ cmpq $____LOAD_PHYSICAL_ADDR, %rbp
13534 jge 1f
13535 #endif
13536- movq $LOAD_PHYSICAL_ADDR, %rbp
13537+ movq $____LOAD_PHYSICAL_ADDR, %rbp
13538 1:
13539
13540 /* Target address to relocate to for decompression */
13541@@ -431,8 +431,8 @@ gdt:
13542 .long gdt
13543 .word 0
13544 .quad 0x0000000000000000 /* NULL descriptor */
13545- .quad 0x00af9a000000ffff /* __KERNEL_CS */
13546- .quad 0x00cf92000000ffff /* __KERNEL_DS */
13547+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
13548+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
13549 .quad 0x0080890000000000 /* TS descriptor */
13550 .quad 0x0000000000000000 /* TS continued */
13551 gdt_end:
13552diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
13553index 57ab74d..7c52182 100644
13554--- a/arch/x86/boot/compressed/misc.c
13555+++ b/arch/x86/boot/compressed/misc.c
13556@@ -242,7 +242,7 @@ static void handle_relocations(void *output, unsigned long output_len)
13557 * Calculate the delta between where vmlinux was linked to load
13558 * and where it was actually loaded.
13559 */
13560- delta = min_addr - LOAD_PHYSICAL_ADDR;
13561+ delta = min_addr - ____LOAD_PHYSICAL_ADDR;
13562 if (!delta) {
13563 debug_putstr("No relocation needed... ");
13564 return;
13565@@ -312,7 +312,7 @@ static void parse_elf(void *output)
13566 Elf32_Ehdr ehdr;
13567 Elf32_Phdr *phdrs, *phdr;
13568 #endif
13569- void *dest;
13570+ void *dest, *prev;
13571 int i;
13572
13573 memcpy(&ehdr, output, sizeof(ehdr));
13574@@ -339,13 +339,16 @@ static void parse_elf(void *output)
13575 case PT_LOAD:
13576 #ifdef CONFIG_RELOCATABLE
13577 dest = output;
13578- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
13579+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
13580 #else
13581 dest = (void *)(phdr->p_paddr);
13582 #endif
13583 memcpy(dest,
13584 output + phdr->p_offset,
13585 phdr->p_filesz);
13586+ if (i)
13587+ memset(prev, 0xff, dest - prev);
13588+ prev = dest + phdr->p_filesz;
13589 break;
13590 default: /* Ignore other PT_* */ break;
13591 }
13592@@ -395,7 +398,7 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap,
13593 error("Destination address too large");
13594 #endif
13595 #ifndef CONFIG_RELOCATABLE
13596- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
13597+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
13598 error("Wrong destination address");
13599 #endif
13600
13601diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
13602index 1fd7d57..0f7d096 100644
13603--- a/arch/x86/boot/cpucheck.c
13604+++ b/arch/x86/boot/cpucheck.c
13605@@ -125,9 +125,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
13606 u32 ecx = MSR_K7_HWCR;
13607 u32 eax, edx;
13608
13609- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
13610+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
13611 eax &= ~(1 << 15);
13612- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
13613+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
13614
13615 get_cpuflags(); /* Make sure it really did something */
13616 err = check_cpuflags();
13617@@ -140,9 +140,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
13618 u32 ecx = MSR_VIA_FCR;
13619 u32 eax, edx;
13620
13621- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
13622+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
13623 eax |= (1<<1)|(1<<7);
13624- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
13625+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
13626
13627 set_bit(X86_FEATURE_CX8, cpu.flags);
13628 err = check_cpuflags();
13629@@ -153,12 +153,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
13630 u32 eax, edx;
13631 u32 level = 1;
13632
13633- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
13634- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
13635- asm("cpuid"
13636+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
13637+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
13638+ asm volatile("cpuid"
13639 : "+a" (level), "=d" (cpu.flags[0])
13640 : : "ecx", "ebx");
13641- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
13642+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
13643
13644 err = check_cpuflags();
13645 } else if (err == 0x01 &&
13646diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
13647index 16ef025..91e033b 100644
13648--- a/arch/x86/boot/header.S
13649+++ b/arch/x86/boot/header.S
13650@@ -438,10 +438,14 @@ setup_data: .quad 0 # 64-bit physical pointer to
13651 # single linked list of
13652 # struct setup_data
13653
13654-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
13655+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
13656
13657 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
13658+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
13659+#define VO_INIT_SIZE (VO__end - VO__text - __PAGE_OFFSET - ____LOAD_PHYSICAL_ADDR)
13660+#else
13661 #define VO_INIT_SIZE (VO__end - VO__text)
13662+#endif
13663 #if ZO_INIT_SIZE > VO_INIT_SIZE
13664 #define INIT_SIZE ZO_INIT_SIZE
13665 #else
13666diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
13667index db75d07..8e6d0af 100644
13668--- a/arch/x86/boot/memory.c
13669+++ b/arch/x86/boot/memory.c
13670@@ -19,7 +19,7 @@
13671
13672 static int detect_memory_e820(void)
13673 {
13674- int count = 0;
13675+ unsigned int count = 0;
13676 struct biosregs ireg, oreg;
13677 struct e820entry *desc = boot_params.e820_map;
13678 static struct e820entry buf; /* static so it is zeroed */
13679diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
13680index ba3e100..6501b8f 100644
13681--- a/arch/x86/boot/video-vesa.c
13682+++ b/arch/x86/boot/video-vesa.c
13683@@ -201,6 +201,7 @@ static void vesa_store_pm_info(void)
13684
13685 boot_params.screen_info.vesapm_seg = oreg.es;
13686 boot_params.screen_info.vesapm_off = oreg.di;
13687+ boot_params.screen_info.vesapm_size = oreg.cx;
13688 }
13689
13690 /*
13691diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
13692index 43eda28..5ab5fdb 100644
13693--- a/arch/x86/boot/video.c
13694+++ b/arch/x86/boot/video.c
13695@@ -96,7 +96,7 @@ static void store_mode_params(void)
13696 static unsigned int get_entry(void)
13697 {
13698 char entry_buf[4];
13699- int i, len = 0;
13700+ unsigned int i, len = 0;
13701 int key;
13702 unsigned int v;
13703
13704diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
13705index 9105655..41779c1 100644
13706--- a/arch/x86/crypto/aes-x86_64-asm_64.S
13707+++ b/arch/x86/crypto/aes-x86_64-asm_64.S
13708@@ -8,6 +8,8 @@
13709 * including this sentence is retained in full.
13710 */
13711
13712+#include <asm/alternative-asm.h>
13713+
13714 .extern crypto_ft_tab
13715 .extern crypto_it_tab
13716 .extern crypto_fl_tab
13717@@ -70,6 +72,8 @@
13718 je B192; \
13719 leaq 32(r9),r9;
13720
13721+#define ret pax_force_retaddr; ret
13722+
13723 #define epilogue(FUNC,r1,r2,r3,r4,r5,r6,r7,r8,r9) \
13724 movq r1,r2; \
13725 movq r3,r4; \
13726diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
13727index 477e9d7..c92c7d8 100644
13728--- a/arch/x86/crypto/aesni-intel_asm.S
13729+++ b/arch/x86/crypto/aesni-intel_asm.S
13730@@ -31,6 +31,7 @@
13731
13732 #include <linux/linkage.h>
13733 #include <asm/inst.h>
13734+#include <asm/alternative-asm.h>
13735
13736 #ifdef __x86_64__
13737 .data
13738@@ -205,7 +206,7 @@ enc: .octa 0x2
13739 * num_initial_blocks = b mod 4
13740 * encrypt the initial num_initial_blocks blocks and apply ghash on
13741 * the ciphertext
13742-* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13743+* %r10, %r11, %r15, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13744 * are clobbered
13745 * arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified
13746 */
13747@@ -214,8 +215,8 @@ enc: .octa 0x2
13748 .macro INITIAL_BLOCKS_DEC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \
13749 XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
13750 mov arg7, %r10 # %r10 = AAD
13751- mov arg8, %r12 # %r12 = aadLen
13752- mov %r12, %r11
13753+ mov arg8, %r15 # %r15 = aadLen
13754+ mov %r15, %r11
13755 pxor %xmm\i, %xmm\i
13756 _get_AAD_loop\num_initial_blocks\operation:
13757 movd (%r10), \TMP1
13758@@ -223,15 +224,15 @@ _get_AAD_loop\num_initial_blocks\operation:
13759 psrldq $4, %xmm\i
13760 pxor \TMP1, %xmm\i
13761 add $4, %r10
13762- sub $4, %r12
13763+ sub $4, %r15
13764 jne _get_AAD_loop\num_initial_blocks\operation
13765 cmp $16, %r11
13766 je _get_AAD_loop2_done\num_initial_blocks\operation
13767- mov $16, %r12
13768+ mov $16, %r15
13769 _get_AAD_loop2\num_initial_blocks\operation:
13770 psrldq $4, %xmm\i
13771- sub $4, %r12
13772- cmp %r11, %r12
13773+ sub $4, %r15
13774+ cmp %r11, %r15
13775 jne _get_AAD_loop2\num_initial_blocks\operation
13776 _get_AAD_loop2_done\num_initial_blocks\operation:
13777 movdqa SHUF_MASK(%rip), %xmm14
13778@@ -443,7 +444,7 @@ _initial_blocks_done\num_initial_blocks\operation:
13779 * num_initial_blocks = b mod 4
13780 * encrypt the initial num_initial_blocks blocks and apply ghash on
13781 * the ciphertext
13782-* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13783+* %r10, %r11, %r15, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13784 * are clobbered
13785 * arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified
13786 */
13787@@ -452,8 +453,8 @@ _initial_blocks_done\num_initial_blocks\operation:
13788 .macro INITIAL_BLOCKS_ENC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \
13789 XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
13790 mov arg7, %r10 # %r10 = AAD
13791- mov arg8, %r12 # %r12 = aadLen
13792- mov %r12, %r11
13793+ mov arg8, %r15 # %r15 = aadLen
13794+ mov %r15, %r11
13795 pxor %xmm\i, %xmm\i
13796 _get_AAD_loop\num_initial_blocks\operation:
13797 movd (%r10), \TMP1
13798@@ -461,15 +462,15 @@ _get_AAD_loop\num_initial_blocks\operation:
13799 psrldq $4, %xmm\i
13800 pxor \TMP1, %xmm\i
13801 add $4, %r10
13802- sub $4, %r12
13803+ sub $4, %r15
13804 jne _get_AAD_loop\num_initial_blocks\operation
13805 cmp $16, %r11
13806 je _get_AAD_loop2_done\num_initial_blocks\operation
13807- mov $16, %r12
13808+ mov $16, %r15
13809 _get_AAD_loop2\num_initial_blocks\operation:
13810 psrldq $4, %xmm\i
13811- sub $4, %r12
13812- cmp %r11, %r12
13813+ sub $4, %r15
13814+ cmp %r11, %r15
13815 jne _get_AAD_loop2\num_initial_blocks\operation
13816 _get_AAD_loop2_done\num_initial_blocks\operation:
13817 movdqa SHUF_MASK(%rip), %xmm14
13818@@ -1269,7 +1270,7 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst
13819 *
13820 *****************************************************************************/
13821 ENTRY(aesni_gcm_dec)
13822- push %r12
13823+ push %r15
13824 push %r13
13825 push %r14
13826 mov %rsp, %r14
13827@@ -1279,8 +1280,8 @@ ENTRY(aesni_gcm_dec)
13828 */
13829 sub $VARIABLE_OFFSET, %rsp
13830 and $~63, %rsp # align rsp to 64 bytes
13831- mov %arg6, %r12
13832- movdqu (%r12), %xmm13 # %xmm13 = HashKey
13833+ mov %arg6, %r15
13834+ movdqu (%r15), %xmm13 # %xmm13 = HashKey
13835 movdqa SHUF_MASK(%rip), %xmm2
13836 PSHUFB_XMM %xmm2, %xmm13
13837
13838@@ -1308,10 +1309,10 @@ ENTRY(aesni_gcm_dec)
13839 movdqa %xmm13, HashKey(%rsp) # store HashKey<<1 (mod poly)
13840 mov %arg4, %r13 # save the number of bytes of plaintext/ciphertext
13841 and $-16, %r13 # %r13 = %r13 - (%r13 mod 16)
13842- mov %r13, %r12
13843- and $(3<<4), %r12
13844+ mov %r13, %r15
13845+ and $(3<<4), %r15
13846 jz _initial_num_blocks_is_0_decrypt
13847- cmp $(2<<4), %r12
13848+ cmp $(2<<4), %r15
13849 jb _initial_num_blocks_is_1_decrypt
13850 je _initial_num_blocks_is_2_decrypt
13851 _initial_num_blocks_is_3_decrypt:
13852@@ -1361,16 +1362,16 @@ _zero_cipher_left_decrypt:
13853 sub $16, %r11
13854 add %r13, %r11
13855 movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte block
13856- lea SHIFT_MASK+16(%rip), %r12
13857- sub %r13, %r12
13858+ lea SHIFT_MASK+16(%rip), %r15
13859+ sub %r13, %r15
13860 # adjust the shuffle mask pointer to be able to shift 16-%r13 bytes
13861 # (%r13 is the number of bytes in plaintext mod 16)
13862- movdqu (%r12), %xmm2 # get the appropriate shuffle mask
13863+ movdqu (%r15), %xmm2 # get the appropriate shuffle mask
13864 PSHUFB_XMM %xmm2, %xmm1 # right shift 16-%r13 butes
13865
13866 movdqa %xmm1, %xmm2
13867 pxor %xmm1, %xmm0 # Ciphertext XOR E(K, Yn)
13868- movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
13869+ movdqu ALL_F-SHIFT_MASK(%r15), %xmm1
13870 # get the appropriate mask to mask out top 16-%r13 bytes of %xmm0
13871 pand %xmm1, %xmm0 # mask out top 16-%r13 bytes of %xmm0
13872 pand %xmm1, %xmm2
13873@@ -1399,9 +1400,9 @@ _less_than_8_bytes_left_decrypt:
13874 sub $1, %r13
13875 jne _less_than_8_bytes_left_decrypt
13876 _multiple_of_16_bytes_decrypt:
13877- mov arg8, %r12 # %r13 = aadLen (number of bytes)
13878- shl $3, %r12 # convert into number of bits
13879- movd %r12d, %xmm15 # len(A) in %xmm15
13880+ mov arg8, %r15 # %r13 = aadLen (number of bytes)
13881+ shl $3, %r15 # convert into number of bits
13882+ movd %r15d, %xmm15 # len(A) in %xmm15
13883 shl $3, %arg4 # len(C) in bits (*128)
13884 MOVQ_R64_XMM %arg4, %xmm1
13885 pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000
13886@@ -1440,7 +1441,8 @@ _return_T_done_decrypt:
13887 mov %r14, %rsp
13888 pop %r14
13889 pop %r13
13890- pop %r12
13891+ pop %r15
13892+ pax_force_retaddr
13893 ret
13894 ENDPROC(aesni_gcm_dec)
13895
13896@@ -1529,7 +1531,7 @@ ENDPROC(aesni_gcm_dec)
13897 * poly = x^128 + x^127 + x^126 + x^121 + 1
13898 ***************************************************************************/
13899 ENTRY(aesni_gcm_enc)
13900- push %r12
13901+ push %r15
13902 push %r13
13903 push %r14
13904 mov %rsp, %r14
13905@@ -1539,8 +1541,8 @@ ENTRY(aesni_gcm_enc)
13906 #
13907 sub $VARIABLE_OFFSET, %rsp
13908 and $~63, %rsp
13909- mov %arg6, %r12
13910- movdqu (%r12), %xmm13
13911+ mov %arg6, %r15
13912+ movdqu (%r15), %xmm13
13913 movdqa SHUF_MASK(%rip), %xmm2
13914 PSHUFB_XMM %xmm2, %xmm13
13915
13916@@ -1564,13 +1566,13 @@ ENTRY(aesni_gcm_enc)
13917 movdqa %xmm13, HashKey(%rsp)
13918 mov %arg4, %r13 # %xmm13 holds HashKey<<1 (mod poly)
13919 and $-16, %r13
13920- mov %r13, %r12
13921+ mov %r13, %r15
13922
13923 # Encrypt first few blocks
13924
13925- and $(3<<4), %r12
13926+ and $(3<<4), %r15
13927 jz _initial_num_blocks_is_0_encrypt
13928- cmp $(2<<4), %r12
13929+ cmp $(2<<4), %r15
13930 jb _initial_num_blocks_is_1_encrypt
13931 je _initial_num_blocks_is_2_encrypt
13932 _initial_num_blocks_is_3_encrypt:
13933@@ -1623,14 +1625,14 @@ _zero_cipher_left_encrypt:
13934 sub $16, %r11
13935 add %r13, %r11
13936 movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte blocks
13937- lea SHIFT_MASK+16(%rip), %r12
13938- sub %r13, %r12
13939+ lea SHIFT_MASK+16(%rip), %r15
13940+ sub %r13, %r15
13941 # adjust the shuffle mask pointer to be able to shift 16-r13 bytes
13942 # (%r13 is the number of bytes in plaintext mod 16)
13943- movdqu (%r12), %xmm2 # get the appropriate shuffle mask
13944+ movdqu (%r15), %xmm2 # get the appropriate shuffle mask
13945 PSHUFB_XMM %xmm2, %xmm1 # shift right 16-r13 byte
13946 pxor %xmm1, %xmm0 # Plaintext XOR Encrypt(K, Yn)
13947- movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
13948+ movdqu ALL_F-SHIFT_MASK(%r15), %xmm1
13949 # get the appropriate mask to mask out top 16-r13 bytes of xmm0
13950 pand %xmm1, %xmm0 # mask out top 16-r13 bytes of xmm0
13951 movdqa SHUF_MASK(%rip), %xmm10
13952@@ -1663,9 +1665,9 @@ _less_than_8_bytes_left_encrypt:
13953 sub $1, %r13
13954 jne _less_than_8_bytes_left_encrypt
13955 _multiple_of_16_bytes_encrypt:
13956- mov arg8, %r12 # %r12 = addLen (number of bytes)
13957- shl $3, %r12
13958- movd %r12d, %xmm15 # len(A) in %xmm15
13959+ mov arg8, %r15 # %r15 = addLen (number of bytes)
13960+ shl $3, %r15
13961+ movd %r15d, %xmm15 # len(A) in %xmm15
13962 shl $3, %arg4 # len(C) in bits (*128)
13963 MOVQ_R64_XMM %arg4, %xmm1
13964 pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000
13965@@ -1704,7 +1706,8 @@ _return_T_done_encrypt:
13966 mov %r14, %rsp
13967 pop %r14
13968 pop %r13
13969- pop %r12
13970+ pop %r15
13971+ pax_force_retaddr
13972 ret
13973 ENDPROC(aesni_gcm_enc)
13974
13975@@ -1722,6 +1725,7 @@ _key_expansion_256a:
13976 pxor %xmm1, %xmm0
13977 movaps %xmm0, (TKEYP)
13978 add $0x10, TKEYP
13979+ pax_force_retaddr
13980 ret
13981 ENDPROC(_key_expansion_128)
13982 ENDPROC(_key_expansion_256a)
13983@@ -1748,6 +1752,7 @@ _key_expansion_192a:
13984 shufps $0b01001110, %xmm2, %xmm1
13985 movaps %xmm1, 0x10(TKEYP)
13986 add $0x20, TKEYP
13987+ pax_force_retaddr
13988 ret
13989 ENDPROC(_key_expansion_192a)
13990
13991@@ -1768,6 +1773,7 @@ _key_expansion_192b:
13992
13993 movaps %xmm0, (TKEYP)
13994 add $0x10, TKEYP
13995+ pax_force_retaddr
13996 ret
13997 ENDPROC(_key_expansion_192b)
13998
13999@@ -1781,6 +1787,7 @@ _key_expansion_256b:
14000 pxor %xmm1, %xmm2
14001 movaps %xmm2, (TKEYP)
14002 add $0x10, TKEYP
14003+ pax_force_retaddr
14004 ret
14005 ENDPROC(_key_expansion_256b)
14006
14007@@ -1894,6 +1901,7 @@ ENTRY(aesni_set_key)
14008 #ifndef __x86_64__
14009 popl KEYP
14010 #endif
14011+ pax_force_retaddr
14012 ret
14013 ENDPROC(aesni_set_key)
14014
14015@@ -1916,6 +1924,7 @@ ENTRY(aesni_enc)
14016 popl KLEN
14017 popl KEYP
14018 #endif
14019+ pax_force_retaddr
14020 ret
14021 ENDPROC(aesni_enc)
14022
14023@@ -1974,6 +1983,7 @@ _aesni_enc1:
14024 AESENC KEY STATE
14025 movaps 0x70(TKEYP), KEY
14026 AESENCLAST KEY STATE
14027+ pax_force_retaddr
14028 ret
14029 ENDPROC(_aesni_enc1)
14030
14031@@ -2083,6 +2093,7 @@ _aesni_enc4:
14032 AESENCLAST KEY STATE2
14033 AESENCLAST KEY STATE3
14034 AESENCLAST KEY STATE4
14035+ pax_force_retaddr
14036 ret
14037 ENDPROC(_aesni_enc4)
14038
14039@@ -2106,6 +2117,7 @@ ENTRY(aesni_dec)
14040 popl KLEN
14041 popl KEYP
14042 #endif
14043+ pax_force_retaddr
14044 ret
14045 ENDPROC(aesni_dec)
14046
14047@@ -2164,6 +2176,7 @@ _aesni_dec1:
14048 AESDEC KEY STATE
14049 movaps 0x70(TKEYP), KEY
14050 AESDECLAST KEY STATE
14051+ pax_force_retaddr
14052 ret
14053 ENDPROC(_aesni_dec1)
14054
14055@@ -2273,6 +2286,7 @@ _aesni_dec4:
14056 AESDECLAST KEY STATE2
14057 AESDECLAST KEY STATE3
14058 AESDECLAST KEY STATE4
14059+ pax_force_retaddr
14060 ret
14061 ENDPROC(_aesni_dec4)
14062
14063@@ -2331,6 +2345,7 @@ ENTRY(aesni_ecb_enc)
14064 popl KEYP
14065 popl LEN
14066 #endif
14067+ pax_force_retaddr
14068 ret
14069 ENDPROC(aesni_ecb_enc)
14070
14071@@ -2390,6 +2405,7 @@ ENTRY(aesni_ecb_dec)
14072 popl KEYP
14073 popl LEN
14074 #endif
14075+ pax_force_retaddr
14076 ret
14077 ENDPROC(aesni_ecb_dec)
14078
14079@@ -2432,6 +2448,7 @@ ENTRY(aesni_cbc_enc)
14080 popl LEN
14081 popl IVP
14082 #endif
14083+ pax_force_retaddr
14084 ret
14085 ENDPROC(aesni_cbc_enc)
14086
14087@@ -2523,6 +2540,7 @@ ENTRY(aesni_cbc_dec)
14088 popl LEN
14089 popl IVP
14090 #endif
14091+ pax_force_retaddr
14092 ret
14093 ENDPROC(aesni_cbc_dec)
14094
14095@@ -2550,6 +2568,7 @@ _aesni_inc_init:
14096 mov $1, TCTR_LOW
14097 MOVQ_R64_XMM TCTR_LOW INC
14098 MOVQ_R64_XMM CTR TCTR_LOW
14099+ pax_force_retaddr
14100 ret
14101 ENDPROC(_aesni_inc_init)
14102
14103@@ -2579,6 +2598,7 @@ _aesni_inc:
14104 .Linc_low:
14105 movaps CTR, IV
14106 PSHUFB_XMM BSWAP_MASK IV
14107+ pax_force_retaddr
14108 ret
14109 ENDPROC(_aesni_inc)
14110
14111@@ -2640,6 +2660,7 @@ ENTRY(aesni_ctr_enc)
14112 .Lctr_enc_ret:
14113 movups IV, (IVP)
14114 .Lctr_enc_just_ret:
14115+ pax_force_retaddr
14116 ret
14117 ENDPROC(aesni_ctr_enc)
14118
14119@@ -2766,6 +2787,7 @@ ENTRY(aesni_xts_crypt8)
14120 pxor INC, STATE4
14121 movdqu STATE4, 0x70(OUTP)
14122
14123+ pax_force_retaddr
14124 ret
14125 ENDPROC(aesni_xts_crypt8)
14126
14127diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
14128index 246c670..466e2d6 100644
14129--- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
14130+++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
14131@@ -21,6 +21,7 @@
14132 */
14133
14134 #include <linux/linkage.h>
14135+#include <asm/alternative-asm.h>
14136
14137 .file "blowfish-x86_64-asm.S"
14138 .text
14139@@ -149,9 +150,11 @@ ENTRY(__blowfish_enc_blk)
14140 jnz .L__enc_xor;
14141
14142 write_block();
14143+ pax_force_retaddr
14144 ret;
14145 .L__enc_xor:
14146 xor_block();
14147+ pax_force_retaddr
14148 ret;
14149 ENDPROC(__blowfish_enc_blk)
14150
14151@@ -183,6 +186,7 @@ ENTRY(blowfish_dec_blk)
14152
14153 movq %r11, %rbp;
14154
14155+ pax_force_retaddr
14156 ret;
14157 ENDPROC(blowfish_dec_blk)
14158
14159@@ -334,6 +338,7 @@ ENTRY(__blowfish_enc_blk_4way)
14160
14161 popq %rbx;
14162 popq %rbp;
14163+ pax_force_retaddr
14164 ret;
14165
14166 .L__enc_xor4:
14167@@ -341,6 +346,7 @@ ENTRY(__blowfish_enc_blk_4way)
14168
14169 popq %rbx;
14170 popq %rbp;
14171+ pax_force_retaddr
14172 ret;
14173 ENDPROC(__blowfish_enc_blk_4way)
14174
14175@@ -375,5 +381,6 @@ ENTRY(blowfish_dec_blk_4way)
14176 popq %rbx;
14177 popq %rbp;
14178
14179+ pax_force_retaddr
14180 ret;
14181 ENDPROC(blowfish_dec_blk_4way)
14182diff --git a/arch/x86/crypto/camellia-aesni-avx-asm_64.S b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
14183index ce71f92..1dce7ec 100644
14184--- a/arch/x86/crypto/camellia-aesni-avx-asm_64.S
14185+++ b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
14186@@ -16,6 +16,7 @@
14187 */
14188
14189 #include <linux/linkage.h>
14190+#include <asm/alternative-asm.h>
14191
14192 #define CAMELLIA_TABLE_BYTE_LEN 272
14193
14194@@ -191,6 +192,7 @@ roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
14195 roundsm16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
14196 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15,
14197 %rcx, (%r9));
14198+ pax_force_retaddr
14199 ret;
14200 ENDPROC(roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
14201
14202@@ -199,6 +201,7 @@ roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
14203 roundsm16(%xmm4, %xmm5, %xmm6, %xmm7, %xmm0, %xmm1, %xmm2, %xmm3,
14204 %xmm12, %xmm13, %xmm14, %xmm15, %xmm8, %xmm9, %xmm10, %xmm11,
14205 %rax, (%r9));
14206+ pax_force_retaddr
14207 ret;
14208 ENDPROC(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
14209
14210@@ -780,6 +783,7 @@ __camellia_enc_blk16:
14211 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
14212 %xmm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 16(%rax));
14213
14214+ pax_force_retaddr
14215 ret;
14216
14217 .align 8
14218@@ -865,6 +869,7 @@ __camellia_dec_blk16:
14219 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
14220 %xmm15, (key_table)(CTX), (%rax), 1 * 16(%rax));
14221
14222+ pax_force_retaddr
14223 ret;
14224
14225 .align 8
14226@@ -904,6 +909,7 @@ ENTRY(camellia_ecb_enc_16way)
14227 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
14228 %xmm8, %rsi);
14229
14230+ pax_force_retaddr
14231 ret;
14232 ENDPROC(camellia_ecb_enc_16way)
14233
14234@@ -932,6 +938,7 @@ ENTRY(camellia_ecb_dec_16way)
14235 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
14236 %xmm8, %rsi);
14237
14238+ pax_force_retaddr
14239 ret;
14240 ENDPROC(camellia_ecb_dec_16way)
14241
14242@@ -981,6 +988,7 @@ ENTRY(camellia_cbc_dec_16way)
14243 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
14244 %xmm8, %rsi);
14245
14246+ pax_force_retaddr
14247 ret;
14248 ENDPROC(camellia_cbc_dec_16way)
14249
14250@@ -1092,6 +1100,7 @@ ENTRY(camellia_ctr_16way)
14251 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
14252 %xmm8, %rsi);
14253
14254+ pax_force_retaddr
14255 ret;
14256 ENDPROC(camellia_ctr_16way)
14257
14258@@ -1234,6 +1243,7 @@ camellia_xts_crypt_16way:
14259 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
14260 %xmm8, %rsi);
14261
14262+ pax_force_retaddr
14263 ret;
14264 ENDPROC(camellia_xts_crypt_16way)
14265
14266diff --git a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
14267index 0e0b886..5a3123c 100644
14268--- a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
14269+++ b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
14270@@ -11,6 +11,7 @@
14271 */
14272
14273 #include <linux/linkage.h>
14274+#include <asm/alternative-asm.h>
14275
14276 #define CAMELLIA_TABLE_BYTE_LEN 272
14277
14278@@ -230,6 +231,7 @@ roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
14279 roundsm32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
14280 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15,
14281 %rcx, (%r9));
14282+ pax_force_retaddr
14283 ret;
14284 ENDPROC(roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
14285
14286@@ -238,6 +240,7 @@ roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
14287 roundsm32(%ymm4, %ymm5, %ymm6, %ymm7, %ymm0, %ymm1, %ymm2, %ymm3,
14288 %ymm12, %ymm13, %ymm14, %ymm15, %ymm8, %ymm9, %ymm10, %ymm11,
14289 %rax, (%r9));
14290+ pax_force_retaddr
14291 ret;
14292 ENDPROC(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
14293
14294@@ -820,6 +823,7 @@ __camellia_enc_blk32:
14295 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
14296 %ymm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 32(%rax));
14297
14298+ pax_force_retaddr
14299 ret;
14300
14301 .align 8
14302@@ -905,6 +909,7 @@ __camellia_dec_blk32:
14303 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
14304 %ymm15, (key_table)(CTX), (%rax), 1 * 32(%rax));
14305
14306+ pax_force_retaddr
14307 ret;
14308
14309 .align 8
14310@@ -948,6 +953,7 @@ ENTRY(camellia_ecb_enc_32way)
14311
14312 vzeroupper;
14313
14314+ pax_force_retaddr
14315 ret;
14316 ENDPROC(camellia_ecb_enc_32way)
14317
14318@@ -980,6 +986,7 @@ ENTRY(camellia_ecb_dec_32way)
14319
14320 vzeroupper;
14321
14322+ pax_force_retaddr
14323 ret;
14324 ENDPROC(camellia_ecb_dec_32way)
14325
14326@@ -1046,6 +1053,7 @@ ENTRY(camellia_cbc_dec_32way)
14327
14328 vzeroupper;
14329
14330+ pax_force_retaddr
14331 ret;
14332 ENDPROC(camellia_cbc_dec_32way)
14333
14334@@ -1184,6 +1192,7 @@ ENTRY(camellia_ctr_32way)
14335
14336 vzeroupper;
14337
14338+ pax_force_retaddr
14339 ret;
14340 ENDPROC(camellia_ctr_32way)
14341
14342@@ -1349,6 +1358,7 @@ camellia_xts_crypt_32way:
14343
14344 vzeroupper;
14345
14346+ pax_force_retaddr
14347 ret;
14348 ENDPROC(camellia_xts_crypt_32way)
14349
14350diff --git a/arch/x86/crypto/camellia-x86_64-asm_64.S b/arch/x86/crypto/camellia-x86_64-asm_64.S
14351index 310319c..db3d7b5 100644
14352--- a/arch/x86/crypto/camellia-x86_64-asm_64.S
14353+++ b/arch/x86/crypto/camellia-x86_64-asm_64.S
14354@@ -21,6 +21,7 @@
14355 */
14356
14357 #include <linux/linkage.h>
14358+#include <asm/alternative-asm.h>
14359
14360 .file "camellia-x86_64-asm_64.S"
14361 .text
14362@@ -228,12 +229,14 @@ ENTRY(__camellia_enc_blk)
14363 enc_outunpack(mov, RT1);
14364
14365 movq RRBP, %rbp;
14366+ pax_force_retaddr
14367 ret;
14368
14369 .L__enc_xor:
14370 enc_outunpack(xor, RT1);
14371
14372 movq RRBP, %rbp;
14373+ pax_force_retaddr
14374 ret;
14375 ENDPROC(__camellia_enc_blk)
14376
14377@@ -272,6 +275,7 @@ ENTRY(camellia_dec_blk)
14378 dec_outunpack();
14379
14380 movq RRBP, %rbp;
14381+ pax_force_retaddr
14382 ret;
14383 ENDPROC(camellia_dec_blk)
14384
14385@@ -463,6 +467,7 @@ ENTRY(__camellia_enc_blk_2way)
14386
14387 movq RRBP, %rbp;
14388 popq %rbx;
14389+ pax_force_retaddr
14390 ret;
14391
14392 .L__enc2_xor:
14393@@ -470,6 +475,7 @@ ENTRY(__camellia_enc_blk_2way)
14394
14395 movq RRBP, %rbp;
14396 popq %rbx;
14397+ pax_force_retaddr
14398 ret;
14399 ENDPROC(__camellia_enc_blk_2way)
14400
14401@@ -510,5 +516,6 @@ ENTRY(camellia_dec_blk_2way)
14402
14403 movq RRBP, %rbp;
14404 movq RXOR, %rbx;
14405+ pax_force_retaddr
14406 ret;
14407 ENDPROC(camellia_dec_blk_2way)
14408diff --git a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
14409index c35fd5d..2d8c7db 100644
14410--- a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
14411+++ b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
14412@@ -24,6 +24,7 @@
14413 */
14414
14415 #include <linux/linkage.h>
14416+#include <asm/alternative-asm.h>
14417
14418 .file "cast5-avx-x86_64-asm_64.S"
14419
14420@@ -281,6 +282,7 @@ __cast5_enc_blk16:
14421 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
14422 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
14423
14424+ pax_force_retaddr
14425 ret;
14426 ENDPROC(__cast5_enc_blk16)
14427
14428@@ -352,6 +354,7 @@ __cast5_dec_blk16:
14429 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
14430 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
14431
14432+ pax_force_retaddr
14433 ret;
14434
14435 .L__skip_dec:
14436@@ -388,6 +391,7 @@ ENTRY(cast5_ecb_enc_16way)
14437 vmovdqu RR4, (6*4*4)(%r11);
14438 vmovdqu RL4, (7*4*4)(%r11);
14439
14440+ pax_force_retaddr
14441 ret;
14442 ENDPROC(cast5_ecb_enc_16way)
14443
14444@@ -420,6 +424,7 @@ ENTRY(cast5_ecb_dec_16way)
14445 vmovdqu RR4, (6*4*4)(%r11);
14446 vmovdqu RL4, (7*4*4)(%r11);
14447
14448+ pax_force_retaddr
14449 ret;
14450 ENDPROC(cast5_ecb_dec_16way)
14451
14452@@ -430,10 +435,10 @@ ENTRY(cast5_cbc_dec_16way)
14453 * %rdx: src
14454 */
14455
14456- pushq %r12;
14457+ pushq %r14;
14458
14459 movq %rsi, %r11;
14460- movq %rdx, %r12;
14461+ movq %rdx, %r14;
14462
14463 vmovdqu (0*16)(%rdx), RL1;
14464 vmovdqu (1*16)(%rdx), RR1;
14465@@ -447,16 +452,16 @@ ENTRY(cast5_cbc_dec_16way)
14466 call __cast5_dec_blk16;
14467
14468 /* xor with src */
14469- vmovq (%r12), RX;
14470+ vmovq (%r14), RX;
14471 vpshufd $0x4f, RX, RX;
14472 vpxor RX, RR1, RR1;
14473- vpxor 0*16+8(%r12), RL1, RL1;
14474- vpxor 1*16+8(%r12), RR2, RR2;
14475- vpxor 2*16+8(%r12), RL2, RL2;
14476- vpxor 3*16+8(%r12), RR3, RR3;
14477- vpxor 4*16+8(%r12), RL3, RL3;
14478- vpxor 5*16+8(%r12), RR4, RR4;
14479- vpxor 6*16+8(%r12), RL4, RL4;
14480+ vpxor 0*16+8(%r14), RL1, RL1;
14481+ vpxor 1*16+8(%r14), RR2, RR2;
14482+ vpxor 2*16+8(%r14), RL2, RL2;
14483+ vpxor 3*16+8(%r14), RR3, RR3;
14484+ vpxor 4*16+8(%r14), RL3, RL3;
14485+ vpxor 5*16+8(%r14), RR4, RR4;
14486+ vpxor 6*16+8(%r14), RL4, RL4;
14487
14488 vmovdqu RR1, (0*16)(%r11);
14489 vmovdqu RL1, (1*16)(%r11);
14490@@ -467,8 +472,9 @@ ENTRY(cast5_cbc_dec_16way)
14491 vmovdqu RR4, (6*16)(%r11);
14492 vmovdqu RL4, (7*16)(%r11);
14493
14494- popq %r12;
14495+ popq %r14;
14496
14497+ pax_force_retaddr
14498 ret;
14499 ENDPROC(cast5_cbc_dec_16way)
14500
14501@@ -480,10 +486,10 @@ ENTRY(cast5_ctr_16way)
14502 * %rcx: iv (big endian, 64bit)
14503 */
14504
14505- pushq %r12;
14506+ pushq %r14;
14507
14508 movq %rsi, %r11;
14509- movq %rdx, %r12;
14510+ movq %rdx, %r14;
14511
14512 vpcmpeqd RTMP, RTMP, RTMP;
14513 vpsrldq $8, RTMP, RTMP; /* low: -1, high: 0 */
14514@@ -523,14 +529,14 @@ ENTRY(cast5_ctr_16way)
14515 call __cast5_enc_blk16;
14516
14517 /* dst = src ^ iv */
14518- vpxor (0*16)(%r12), RR1, RR1;
14519- vpxor (1*16)(%r12), RL1, RL1;
14520- vpxor (2*16)(%r12), RR2, RR2;
14521- vpxor (3*16)(%r12), RL2, RL2;
14522- vpxor (4*16)(%r12), RR3, RR3;
14523- vpxor (5*16)(%r12), RL3, RL3;
14524- vpxor (6*16)(%r12), RR4, RR4;
14525- vpxor (7*16)(%r12), RL4, RL4;
14526+ vpxor (0*16)(%r14), RR1, RR1;
14527+ vpxor (1*16)(%r14), RL1, RL1;
14528+ vpxor (2*16)(%r14), RR2, RR2;
14529+ vpxor (3*16)(%r14), RL2, RL2;
14530+ vpxor (4*16)(%r14), RR3, RR3;
14531+ vpxor (5*16)(%r14), RL3, RL3;
14532+ vpxor (6*16)(%r14), RR4, RR4;
14533+ vpxor (7*16)(%r14), RL4, RL4;
14534 vmovdqu RR1, (0*16)(%r11);
14535 vmovdqu RL1, (1*16)(%r11);
14536 vmovdqu RR2, (2*16)(%r11);
14537@@ -540,7 +546,8 @@ ENTRY(cast5_ctr_16way)
14538 vmovdqu RR4, (6*16)(%r11);
14539 vmovdqu RL4, (7*16)(%r11);
14540
14541- popq %r12;
14542+ popq %r14;
14543
14544+ pax_force_retaddr
14545 ret;
14546 ENDPROC(cast5_ctr_16way)
14547diff --git a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
14548index e3531f8..e123f35 100644
14549--- a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
14550+++ b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
14551@@ -24,6 +24,7 @@
14552 */
14553
14554 #include <linux/linkage.h>
14555+#include <asm/alternative-asm.h>
14556 #include "glue_helper-asm-avx.S"
14557
14558 .file "cast6-avx-x86_64-asm_64.S"
14559@@ -295,6 +296,7 @@ __cast6_enc_blk8:
14560 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
14561 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
14562
14563+ pax_force_retaddr
14564 ret;
14565 ENDPROC(__cast6_enc_blk8)
14566
14567@@ -340,6 +342,7 @@ __cast6_dec_blk8:
14568 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
14569 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
14570
14571+ pax_force_retaddr
14572 ret;
14573 ENDPROC(__cast6_dec_blk8)
14574
14575@@ -358,6 +361,7 @@ ENTRY(cast6_ecb_enc_8way)
14576
14577 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14578
14579+ pax_force_retaddr
14580 ret;
14581 ENDPROC(cast6_ecb_enc_8way)
14582
14583@@ -376,6 +380,7 @@ ENTRY(cast6_ecb_dec_8way)
14584
14585 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14586
14587+ pax_force_retaddr
14588 ret;
14589 ENDPROC(cast6_ecb_dec_8way)
14590
14591@@ -386,19 +391,20 @@ ENTRY(cast6_cbc_dec_8way)
14592 * %rdx: src
14593 */
14594
14595- pushq %r12;
14596+ pushq %r14;
14597
14598 movq %rsi, %r11;
14599- movq %rdx, %r12;
14600+ movq %rdx, %r14;
14601
14602 load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14603
14604 call __cast6_dec_blk8;
14605
14606- store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14607+ store_cbc_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14608
14609- popq %r12;
14610+ popq %r14;
14611
14612+ pax_force_retaddr
14613 ret;
14614 ENDPROC(cast6_cbc_dec_8way)
14615
14616@@ -410,20 +416,21 @@ ENTRY(cast6_ctr_8way)
14617 * %rcx: iv (little endian, 128bit)
14618 */
14619
14620- pushq %r12;
14621+ pushq %r14;
14622
14623 movq %rsi, %r11;
14624- movq %rdx, %r12;
14625+ movq %rdx, %r14;
14626
14627 load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
14628 RD2, RX, RKR, RKM);
14629
14630 call __cast6_enc_blk8;
14631
14632- store_ctr_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14633+ store_ctr_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14634
14635- popq %r12;
14636+ popq %r14;
14637
14638+ pax_force_retaddr
14639 ret;
14640 ENDPROC(cast6_ctr_8way)
14641
14642@@ -446,6 +453,7 @@ ENTRY(cast6_xts_enc_8way)
14643 /* dst <= regs xor IVs(in dst) */
14644 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14645
14646+ pax_force_retaddr
14647 ret;
14648 ENDPROC(cast6_xts_enc_8way)
14649
14650@@ -468,5 +476,6 @@ ENTRY(cast6_xts_dec_8way)
14651 /* dst <= regs xor IVs(in dst) */
14652 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14653
14654+ pax_force_retaddr
14655 ret;
14656 ENDPROC(cast6_xts_dec_8way)
14657diff --git a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
14658index 26d49eb..c0a8c84 100644
14659--- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
14660+++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
14661@@ -45,6 +45,7 @@
14662
14663 #include <asm/inst.h>
14664 #include <linux/linkage.h>
14665+#include <asm/alternative-asm.h>
14666
14667 ## ISCSI CRC 32 Implementation with crc32 and pclmulqdq Instruction
14668
14669@@ -309,6 +310,7 @@ do_return:
14670 popq %rsi
14671 popq %rdi
14672 popq %rbx
14673+ pax_force_retaddr
14674 ret
14675
14676 ################################################################
14677diff --git a/arch/x86/crypto/ghash-clmulni-intel_asm.S b/arch/x86/crypto/ghash-clmulni-intel_asm.S
14678index 5d1e007..098cb4f 100644
14679--- a/arch/x86/crypto/ghash-clmulni-intel_asm.S
14680+++ b/arch/x86/crypto/ghash-clmulni-intel_asm.S
14681@@ -18,6 +18,7 @@
14682
14683 #include <linux/linkage.h>
14684 #include <asm/inst.h>
14685+#include <asm/alternative-asm.h>
14686
14687 .data
14688
14689@@ -89,6 +90,7 @@ __clmul_gf128mul_ble:
14690 psrlq $1, T2
14691 pxor T2, T1
14692 pxor T1, DATA
14693+ pax_force_retaddr
14694 ret
14695 ENDPROC(__clmul_gf128mul_ble)
14696
14697@@ -101,6 +103,7 @@ ENTRY(clmul_ghash_mul)
14698 call __clmul_gf128mul_ble
14699 PSHUFB_XMM BSWAP DATA
14700 movups DATA, (%rdi)
14701+ pax_force_retaddr
14702 ret
14703 ENDPROC(clmul_ghash_mul)
14704
14705@@ -128,5 +131,6 @@ ENTRY(clmul_ghash_update)
14706 PSHUFB_XMM BSWAP DATA
14707 movups DATA, (%rdi)
14708 .Lupdate_just_ret:
14709+ pax_force_retaddr
14710 ret
14711 ENDPROC(clmul_ghash_update)
14712diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
14713index 9279e0b..c4b3d2c 100644
14714--- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
14715+++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
14716@@ -1,4 +1,5 @@
14717 #include <linux/linkage.h>
14718+#include <asm/alternative-asm.h>
14719
14720 # enter salsa20_encrypt_bytes
14721 ENTRY(salsa20_encrypt_bytes)
14722@@ -789,6 +790,7 @@ ENTRY(salsa20_encrypt_bytes)
14723 add %r11,%rsp
14724 mov %rdi,%rax
14725 mov %rsi,%rdx
14726+ pax_force_retaddr
14727 ret
14728 # bytesatleast65:
14729 ._bytesatleast65:
14730@@ -889,6 +891,7 @@ ENTRY(salsa20_keysetup)
14731 add %r11,%rsp
14732 mov %rdi,%rax
14733 mov %rsi,%rdx
14734+ pax_force_retaddr
14735 ret
14736 ENDPROC(salsa20_keysetup)
14737
14738@@ -914,5 +917,6 @@ ENTRY(salsa20_ivsetup)
14739 add %r11,%rsp
14740 mov %rdi,%rax
14741 mov %rsi,%rdx
14742+ pax_force_retaddr
14743 ret
14744 ENDPROC(salsa20_ivsetup)
14745diff --git a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
14746index 2f202f4..d9164d6 100644
14747--- a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
14748+++ b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
14749@@ -24,6 +24,7 @@
14750 */
14751
14752 #include <linux/linkage.h>
14753+#include <asm/alternative-asm.h>
14754 #include "glue_helper-asm-avx.S"
14755
14756 .file "serpent-avx-x86_64-asm_64.S"
14757@@ -618,6 +619,7 @@ __serpent_enc_blk8_avx:
14758 write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14759 write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14760
14761+ pax_force_retaddr
14762 ret;
14763 ENDPROC(__serpent_enc_blk8_avx)
14764
14765@@ -672,6 +674,7 @@ __serpent_dec_blk8_avx:
14766 write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
14767 write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
14768
14769+ pax_force_retaddr
14770 ret;
14771 ENDPROC(__serpent_dec_blk8_avx)
14772
14773@@ -688,6 +691,7 @@ ENTRY(serpent_ecb_enc_8way_avx)
14774
14775 store_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14776
14777+ pax_force_retaddr
14778 ret;
14779 ENDPROC(serpent_ecb_enc_8way_avx)
14780
14781@@ -704,6 +708,7 @@ ENTRY(serpent_ecb_dec_8way_avx)
14782
14783 store_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
14784
14785+ pax_force_retaddr
14786 ret;
14787 ENDPROC(serpent_ecb_dec_8way_avx)
14788
14789@@ -720,6 +725,7 @@ ENTRY(serpent_cbc_dec_8way_avx)
14790
14791 store_cbc_8way(%rdx, %rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
14792
14793+ pax_force_retaddr
14794 ret;
14795 ENDPROC(serpent_cbc_dec_8way_avx)
14796
14797@@ -738,6 +744,7 @@ ENTRY(serpent_ctr_8way_avx)
14798
14799 store_ctr_8way(%rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14800
14801+ pax_force_retaddr
14802 ret;
14803 ENDPROC(serpent_ctr_8way_avx)
14804
14805@@ -758,6 +765,7 @@ ENTRY(serpent_xts_enc_8way_avx)
14806 /* dst <= regs xor IVs(in dst) */
14807 store_xts_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14808
14809+ pax_force_retaddr
14810 ret;
14811 ENDPROC(serpent_xts_enc_8way_avx)
14812
14813@@ -778,5 +786,6 @@ ENTRY(serpent_xts_dec_8way_avx)
14814 /* dst <= regs xor IVs(in dst) */
14815 store_xts_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
14816
14817+ pax_force_retaddr
14818 ret;
14819 ENDPROC(serpent_xts_dec_8way_avx)
14820diff --git a/arch/x86/crypto/serpent-avx2-asm_64.S b/arch/x86/crypto/serpent-avx2-asm_64.S
14821index b222085..abd483c 100644
14822--- a/arch/x86/crypto/serpent-avx2-asm_64.S
14823+++ b/arch/x86/crypto/serpent-avx2-asm_64.S
14824@@ -15,6 +15,7 @@
14825 */
14826
14827 #include <linux/linkage.h>
14828+#include <asm/alternative-asm.h>
14829 #include "glue_helper-asm-avx2.S"
14830
14831 .file "serpent-avx2-asm_64.S"
14832@@ -610,6 +611,7 @@ __serpent_enc_blk16:
14833 write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14834 write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14835
14836+ pax_force_retaddr
14837 ret;
14838 ENDPROC(__serpent_enc_blk16)
14839
14840@@ -664,6 +666,7 @@ __serpent_dec_blk16:
14841 write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
14842 write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
14843
14844+ pax_force_retaddr
14845 ret;
14846 ENDPROC(__serpent_dec_blk16)
14847
14848@@ -684,6 +687,7 @@ ENTRY(serpent_ecb_enc_16way)
14849
14850 vzeroupper;
14851
14852+ pax_force_retaddr
14853 ret;
14854 ENDPROC(serpent_ecb_enc_16way)
14855
14856@@ -704,6 +708,7 @@ ENTRY(serpent_ecb_dec_16way)
14857
14858 vzeroupper;
14859
14860+ pax_force_retaddr
14861 ret;
14862 ENDPROC(serpent_ecb_dec_16way)
14863
14864@@ -725,6 +730,7 @@ ENTRY(serpent_cbc_dec_16way)
14865
14866 vzeroupper;
14867
14868+ pax_force_retaddr
14869 ret;
14870 ENDPROC(serpent_cbc_dec_16way)
14871
14872@@ -748,6 +754,7 @@ ENTRY(serpent_ctr_16way)
14873
14874 vzeroupper;
14875
14876+ pax_force_retaddr
14877 ret;
14878 ENDPROC(serpent_ctr_16way)
14879
14880@@ -772,6 +779,7 @@ ENTRY(serpent_xts_enc_16way)
14881
14882 vzeroupper;
14883
14884+ pax_force_retaddr
14885 ret;
14886 ENDPROC(serpent_xts_enc_16way)
14887
14888@@ -796,5 +804,6 @@ ENTRY(serpent_xts_dec_16way)
14889
14890 vzeroupper;
14891
14892+ pax_force_retaddr
14893 ret;
14894 ENDPROC(serpent_xts_dec_16way)
14895diff --git a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
14896index acc066c..1559cc4 100644
14897--- a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
14898+++ b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
14899@@ -25,6 +25,7 @@
14900 */
14901
14902 #include <linux/linkage.h>
14903+#include <asm/alternative-asm.h>
14904
14905 .file "serpent-sse2-x86_64-asm_64.S"
14906 .text
14907@@ -690,12 +691,14 @@ ENTRY(__serpent_enc_blk_8way)
14908 write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14909 write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14910
14911+ pax_force_retaddr
14912 ret;
14913
14914 .L__enc_xor8:
14915 xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14916 xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14917
14918+ pax_force_retaddr
14919 ret;
14920 ENDPROC(__serpent_enc_blk_8way)
14921
14922@@ -750,5 +753,6 @@ ENTRY(serpent_dec_blk_8way)
14923 write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2);
14924 write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
14925
14926+ pax_force_retaddr
14927 ret;
14928 ENDPROC(serpent_dec_blk_8way)
14929diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
14930index a410950..9dfe7ad 100644
14931--- a/arch/x86/crypto/sha1_ssse3_asm.S
14932+++ b/arch/x86/crypto/sha1_ssse3_asm.S
14933@@ -29,6 +29,7 @@
14934 */
14935
14936 #include <linux/linkage.h>
14937+#include <asm/alternative-asm.h>
14938
14939 #define CTX %rdi // arg1
14940 #define BUF %rsi // arg2
14941@@ -75,9 +76,9 @@
14942
14943 push %rbx
14944 push %rbp
14945- push %r12
14946+ push %r14
14947
14948- mov %rsp, %r12
14949+ mov %rsp, %r14
14950 sub $64, %rsp # allocate workspace
14951 and $~15, %rsp # align stack
14952
14953@@ -99,11 +100,12 @@
14954 xor %rax, %rax
14955 rep stosq
14956
14957- mov %r12, %rsp # deallocate workspace
14958+ mov %r14, %rsp # deallocate workspace
14959
14960- pop %r12
14961+ pop %r14
14962 pop %rbp
14963 pop %rbx
14964+ pax_force_retaddr
14965 ret
14966
14967 ENDPROC(\name)
14968diff --git a/arch/x86/crypto/sha256-avx-asm.S b/arch/x86/crypto/sha256-avx-asm.S
14969index 642f156..51a513c 100644
14970--- a/arch/x86/crypto/sha256-avx-asm.S
14971+++ b/arch/x86/crypto/sha256-avx-asm.S
14972@@ -49,6 +49,7 @@
14973
14974 #ifdef CONFIG_AS_AVX
14975 #include <linux/linkage.h>
14976+#include <asm/alternative-asm.h>
14977
14978 ## assume buffers not aligned
14979 #define VMOVDQ vmovdqu
14980@@ -460,6 +461,7 @@ done_hash:
14981 popq %r13
14982 popq %rbp
14983 popq %rbx
14984+ pax_force_retaddr
14985 ret
14986 ENDPROC(sha256_transform_avx)
14987
14988diff --git a/arch/x86/crypto/sha256-avx2-asm.S b/arch/x86/crypto/sha256-avx2-asm.S
14989index 9e86944..3795e6a 100644
14990--- a/arch/x86/crypto/sha256-avx2-asm.S
14991+++ b/arch/x86/crypto/sha256-avx2-asm.S
14992@@ -50,6 +50,7 @@
14993
14994 #ifdef CONFIG_AS_AVX2
14995 #include <linux/linkage.h>
14996+#include <asm/alternative-asm.h>
14997
14998 ## assume buffers not aligned
14999 #define VMOVDQ vmovdqu
15000@@ -720,6 +721,7 @@ done_hash:
15001 popq %r12
15002 popq %rbp
15003 popq %rbx
15004+ pax_force_retaddr
15005 ret
15006 ENDPROC(sha256_transform_rorx)
15007
15008diff --git a/arch/x86/crypto/sha256-ssse3-asm.S b/arch/x86/crypto/sha256-ssse3-asm.S
15009index f833b74..8c62a9e 100644
15010--- a/arch/x86/crypto/sha256-ssse3-asm.S
15011+++ b/arch/x86/crypto/sha256-ssse3-asm.S
15012@@ -47,6 +47,7 @@
15013 ########################################################################
15014
15015 #include <linux/linkage.h>
15016+#include <asm/alternative-asm.h>
15017
15018 ## assume buffers not aligned
15019 #define MOVDQ movdqu
15020@@ -471,6 +472,7 @@ done_hash:
15021 popq %rbp
15022 popq %rbx
15023
15024+ pax_force_retaddr
15025 ret
15026 ENDPROC(sha256_transform_ssse3)
15027
15028diff --git a/arch/x86/crypto/sha512-avx-asm.S b/arch/x86/crypto/sha512-avx-asm.S
15029index 974dde9..a823ff9 100644
15030--- a/arch/x86/crypto/sha512-avx-asm.S
15031+++ b/arch/x86/crypto/sha512-avx-asm.S
15032@@ -49,6 +49,7 @@
15033
15034 #ifdef CONFIG_AS_AVX
15035 #include <linux/linkage.h>
15036+#include <asm/alternative-asm.h>
15037
15038 .text
15039
15040@@ -364,6 +365,7 @@ updateblock:
15041 mov frame_RSPSAVE(%rsp), %rsp
15042
15043 nowork:
15044+ pax_force_retaddr
15045 ret
15046 ENDPROC(sha512_transform_avx)
15047
15048diff --git a/arch/x86/crypto/sha512-avx2-asm.S b/arch/x86/crypto/sha512-avx2-asm.S
15049index 568b961..ed20c37 100644
15050--- a/arch/x86/crypto/sha512-avx2-asm.S
15051+++ b/arch/x86/crypto/sha512-avx2-asm.S
15052@@ -51,6 +51,7 @@
15053
15054 #ifdef CONFIG_AS_AVX2
15055 #include <linux/linkage.h>
15056+#include <asm/alternative-asm.h>
15057
15058 .text
15059
15060@@ -678,6 +679,7 @@ done_hash:
15061
15062 # Restore Stack Pointer
15063 mov frame_RSPSAVE(%rsp), %rsp
15064+ pax_force_retaddr
15065 ret
15066 ENDPROC(sha512_transform_rorx)
15067
15068diff --git a/arch/x86/crypto/sha512-ssse3-asm.S b/arch/x86/crypto/sha512-ssse3-asm.S
15069index fb56855..6edd768 100644
15070--- a/arch/x86/crypto/sha512-ssse3-asm.S
15071+++ b/arch/x86/crypto/sha512-ssse3-asm.S
15072@@ -48,6 +48,7 @@
15073 ########################################################################
15074
15075 #include <linux/linkage.h>
15076+#include <asm/alternative-asm.h>
15077
15078 .text
15079
15080@@ -363,6 +364,7 @@ updateblock:
15081 mov frame_RSPSAVE(%rsp), %rsp
15082
15083 nowork:
15084+ pax_force_retaddr
15085 ret
15086 ENDPROC(sha512_transform_ssse3)
15087
15088diff --git a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
15089index 0505813..b067311 100644
15090--- a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
15091+++ b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
15092@@ -24,6 +24,7 @@
15093 */
15094
15095 #include <linux/linkage.h>
15096+#include <asm/alternative-asm.h>
15097 #include "glue_helper-asm-avx.S"
15098
15099 .file "twofish-avx-x86_64-asm_64.S"
15100@@ -284,6 +285,7 @@ __twofish_enc_blk8:
15101 outunpack_blocks(RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
15102 outunpack_blocks(RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);
15103
15104+ pax_force_retaddr
15105 ret;
15106 ENDPROC(__twofish_enc_blk8)
15107
15108@@ -324,6 +326,7 @@ __twofish_dec_blk8:
15109 outunpack_blocks(RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2);
15110 outunpack_blocks(RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2);
15111
15112+ pax_force_retaddr
15113 ret;
15114 ENDPROC(__twofish_dec_blk8)
15115
15116@@ -342,6 +345,7 @@ ENTRY(twofish_ecb_enc_8way)
15117
15118 store_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
15119
15120+ pax_force_retaddr
15121 ret;
15122 ENDPROC(twofish_ecb_enc_8way)
15123
15124@@ -360,6 +364,7 @@ ENTRY(twofish_ecb_dec_8way)
15125
15126 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
15127
15128+ pax_force_retaddr
15129 ret;
15130 ENDPROC(twofish_ecb_dec_8way)
15131
15132@@ -370,19 +375,20 @@ ENTRY(twofish_cbc_dec_8way)
15133 * %rdx: src
15134 */
15135
15136- pushq %r12;
15137+ pushq %r14;
15138
15139 movq %rsi, %r11;
15140- movq %rdx, %r12;
15141+ movq %rdx, %r14;
15142
15143 load_8way(%rdx, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
15144
15145 call __twofish_dec_blk8;
15146
15147- store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
15148+ store_cbc_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
15149
15150- popq %r12;
15151+ popq %r14;
15152
15153+ pax_force_retaddr
15154 ret;
15155 ENDPROC(twofish_cbc_dec_8way)
15156
15157@@ -394,20 +400,21 @@ ENTRY(twofish_ctr_8way)
15158 * %rcx: iv (little endian, 128bit)
15159 */
15160
15161- pushq %r12;
15162+ pushq %r14;
15163
15164 movq %rsi, %r11;
15165- movq %rdx, %r12;
15166+ movq %rdx, %r14;
15167
15168 load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
15169 RD2, RX0, RX1, RY0);
15170
15171 call __twofish_enc_blk8;
15172
15173- store_ctr_8way(%r12, %r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
15174+ store_ctr_8way(%r14, %r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
15175
15176- popq %r12;
15177+ popq %r14;
15178
15179+ pax_force_retaddr
15180 ret;
15181 ENDPROC(twofish_ctr_8way)
15182
15183@@ -430,6 +437,7 @@ ENTRY(twofish_xts_enc_8way)
15184 /* dst <= regs xor IVs(in dst) */
15185 store_xts_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
15186
15187+ pax_force_retaddr
15188 ret;
15189 ENDPROC(twofish_xts_enc_8way)
15190
15191@@ -452,5 +460,6 @@ ENTRY(twofish_xts_dec_8way)
15192 /* dst <= regs xor IVs(in dst) */
15193 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
15194
15195+ pax_force_retaddr
15196 ret;
15197 ENDPROC(twofish_xts_dec_8way)
15198diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
15199index 1c3b7ce..02f578d 100644
15200--- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
15201+++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
15202@@ -21,6 +21,7 @@
15203 */
15204
15205 #include <linux/linkage.h>
15206+#include <asm/alternative-asm.h>
15207
15208 .file "twofish-x86_64-asm-3way.S"
15209 .text
15210@@ -258,6 +259,7 @@ ENTRY(__twofish_enc_blk_3way)
15211 popq %r13;
15212 popq %r14;
15213 popq %r15;
15214+ pax_force_retaddr
15215 ret;
15216
15217 .L__enc_xor3:
15218@@ -269,6 +271,7 @@ ENTRY(__twofish_enc_blk_3way)
15219 popq %r13;
15220 popq %r14;
15221 popq %r15;
15222+ pax_force_retaddr
15223 ret;
15224 ENDPROC(__twofish_enc_blk_3way)
15225
15226@@ -308,5 +311,6 @@ ENTRY(twofish_dec_blk_3way)
15227 popq %r13;
15228 popq %r14;
15229 popq %r15;
15230+ pax_force_retaddr
15231 ret;
15232 ENDPROC(twofish_dec_blk_3way)
15233diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
15234index a039d21..524b8b2 100644
15235--- a/arch/x86/crypto/twofish-x86_64-asm_64.S
15236+++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
15237@@ -22,6 +22,7 @@
15238
15239 #include <linux/linkage.h>
15240 #include <asm/asm-offsets.h>
15241+#include <asm/alternative-asm.h>
15242
15243 #define a_offset 0
15244 #define b_offset 4
15245@@ -265,6 +266,7 @@ ENTRY(twofish_enc_blk)
15246
15247 popq R1
15248 movq $1,%rax
15249+ pax_force_retaddr
15250 ret
15251 ENDPROC(twofish_enc_blk)
15252
15253@@ -317,5 +319,6 @@ ENTRY(twofish_dec_blk)
15254
15255 popq R1
15256 movq $1,%rax
15257+ pax_force_retaddr
15258 ret
15259 ENDPROC(twofish_dec_blk)
15260diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
15261index d21ff89..6da8e6e 100644
15262--- a/arch/x86/ia32/ia32_aout.c
15263+++ b/arch/x86/ia32/ia32_aout.c
15264@@ -153,6 +153,8 @@ static int aout_core_dump(struct coredump_params *cprm)
15265 unsigned long dump_start, dump_size;
15266 struct user32 dump;
15267
15268+ memset(&dump, 0, sizeof(dump));
15269+
15270 fs = get_fs();
15271 set_fs(KERNEL_DS);
15272 has_dumped = 1;
15273diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
15274index f9e181a..db313b5 100644
15275--- a/arch/x86/ia32/ia32_signal.c
15276+++ b/arch/x86/ia32/ia32_signal.c
15277@@ -218,7 +218,7 @@ asmlinkage long sys32_sigreturn(void)
15278 if (__get_user(set.sig[0], &frame->sc.oldmask)
15279 || (_COMPAT_NSIG_WORDS > 1
15280 && __copy_from_user((((char *) &set.sig) + 4),
15281- &frame->extramask,
15282+ frame->extramask,
15283 sizeof(frame->extramask))))
15284 goto badframe;
15285
15286@@ -338,7 +338,7 @@ static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
15287 sp -= frame_size;
15288 /* Align the stack pointer according to the i386 ABI,
15289 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
15290- sp = ((sp + 4) & -16ul) - 4;
15291+ sp = ((sp - 12) & -16ul) - 4;
15292 return (void __user *) sp;
15293 }
15294
15295@@ -383,10 +383,10 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
15296 } else {
15297 /* Return stub is in 32bit vsyscall page */
15298 if (current->mm->context.vdso)
15299- restorer = current->mm->context.vdso +
15300- selected_vdso32->sym___kernel_sigreturn;
15301+ restorer = (void __force_user *)(current->mm->context.vdso +
15302+ selected_vdso32->sym___kernel_sigreturn);
15303 else
15304- restorer = &frame->retcode;
15305+ restorer = frame->retcode;
15306 }
15307
15308 put_user_try {
15309@@ -396,7 +396,7 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
15310 * These are actually not used anymore, but left because some
15311 * gdb versions depend on them as a marker.
15312 */
15313- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
15314+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
15315 } put_user_catch(err);
15316
15317 if (err)
15318@@ -438,7 +438,7 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
15319 0xb8,
15320 __NR_ia32_rt_sigreturn,
15321 0x80cd,
15322- 0,
15323+ 0
15324 };
15325
15326 frame = get_sigframe(ksig, regs, sizeof(*frame), &fpstate);
15327@@ -461,16 +461,19 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
15328
15329 if (ksig->ka.sa.sa_flags & SA_RESTORER)
15330 restorer = ksig->ka.sa.sa_restorer;
15331- else
15332+ else if (current->mm->context.vdso)
15333+ /* Return stub is in 32bit vsyscall page */
15334 restorer = current->mm->context.vdso +
15335 selected_vdso32->sym___kernel_rt_sigreturn;
15336+ else
15337+ restorer = frame->retcode;
15338 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
15339
15340 /*
15341 * Not actually used anymore, but left because some gdb
15342 * versions need it.
15343 */
15344- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
15345+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
15346 } put_user_catch(err);
15347
15348 err |= copy_siginfo_to_user32(&frame->info, &ksig->info);
15349diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
15350index 4299eb0..fefe70e 100644
15351--- a/arch/x86/ia32/ia32entry.S
15352+++ b/arch/x86/ia32/ia32entry.S
15353@@ -15,8 +15,10 @@
15354 #include <asm/irqflags.h>
15355 #include <asm/asm.h>
15356 #include <asm/smap.h>
15357+#include <asm/pgtable.h>
15358 #include <linux/linkage.h>
15359 #include <linux/err.h>
15360+#include <asm/alternative-asm.h>
15361
15362 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
15363 #include <linux/elf-em.h>
15364@@ -62,12 +64,12 @@
15365 */
15366 .macro LOAD_ARGS32 offset, _r9=0
15367 .if \_r9
15368- movl \offset+16(%rsp),%r9d
15369+ movl \offset+R9(%rsp),%r9d
15370 .endif
15371- movl \offset+40(%rsp),%ecx
15372- movl \offset+48(%rsp),%edx
15373- movl \offset+56(%rsp),%esi
15374- movl \offset+64(%rsp),%edi
15375+ movl \offset+RCX(%rsp),%ecx
15376+ movl \offset+RDX(%rsp),%edx
15377+ movl \offset+RSI(%rsp),%esi
15378+ movl \offset+RDI(%rsp),%edi
15379 movl %eax,%eax /* zero extension */
15380 .endm
15381
15382@@ -96,6 +98,32 @@ ENTRY(native_irq_enable_sysexit)
15383 ENDPROC(native_irq_enable_sysexit)
15384 #endif
15385
15386+ .macro pax_enter_kernel_user
15387+ pax_set_fptr_mask
15388+#ifdef CONFIG_PAX_MEMORY_UDEREF
15389+ call pax_enter_kernel_user
15390+#endif
15391+ .endm
15392+
15393+ .macro pax_exit_kernel_user
15394+#ifdef CONFIG_PAX_MEMORY_UDEREF
15395+ call pax_exit_kernel_user
15396+#endif
15397+#ifdef CONFIG_PAX_RANDKSTACK
15398+ pushq %rax
15399+ pushq %r11
15400+ call pax_randomize_kstack
15401+ popq %r11
15402+ popq %rax
15403+#endif
15404+ .endm
15405+
15406+ .macro pax_erase_kstack
15407+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15408+ call pax_erase_kstack
15409+#endif
15410+ .endm
15411+
15412 /*
15413 * 32bit SYSENTER instruction entry.
15414 *
15415@@ -122,12 +150,6 @@ ENTRY(ia32_sysenter_target)
15416 CFI_REGISTER rsp,rbp
15417 SWAPGS_UNSAFE_STACK
15418 movq PER_CPU_VAR(kernel_stack), %rsp
15419- addq $(KERNEL_STACK_OFFSET),%rsp
15420- /*
15421- * No need to follow this irqs on/off section: the syscall
15422- * disabled irqs, here we enable it straight after entry:
15423- */
15424- ENABLE_INTERRUPTS(CLBR_NONE)
15425 movl %ebp,%ebp /* zero extension */
15426 pushq_cfi $__USER32_DS
15427 /*CFI_REL_OFFSET ss,0*/
15428@@ -135,24 +157,49 @@ ENTRY(ia32_sysenter_target)
15429 CFI_REL_OFFSET rsp,0
15430 pushfq_cfi
15431 /*CFI_REL_OFFSET rflags,0*/
15432- movl TI_sysenter_return+THREAD_INFO(%rsp,3*8-KERNEL_STACK_OFFSET),%r10d
15433- CFI_REGISTER rip,r10
15434+ orl $X86_EFLAGS_IF,(%rsp)
15435+ GET_THREAD_INFO(%r11)
15436+ movl TI_sysenter_return(%r11), %r11d
15437+ CFI_REGISTER rip,r11
15438 pushq_cfi $__USER32_CS
15439 /*CFI_REL_OFFSET cs,0*/
15440 movl %eax, %eax
15441- pushq_cfi %r10
15442+ pushq_cfi %r11
15443 CFI_REL_OFFSET rip,0
15444 pushq_cfi %rax
15445 cld
15446 SAVE_ARGS 0,1,0
15447+ pax_enter_kernel_user
15448+
15449+#ifdef CONFIG_PAX_RANDKSTACK
15450+ pax_erase_kstack
15451+#endif
15452+
15453+ /*
15454+ * No need to follow this irqs on/off section: the syscall
15455+ * disabled irqs, here we enable it straight after entry:
15456+ */
15457+ ENABLE_INTERRUPTS(CLBR_NONE)
15458 /* no need to do an access_ok check here because rbp has been
15459 32bit zero extended */
15460+
15461+#ifdef CONFIG_PAX_MEMORY_UDEREF
15462+ addq pax_user_shadow_base,%rbp
15463+ ASM_PAX_OPEN_USERLAND
15464+#endif
15465+
15466 ASM_STAC
15467 1: movl (%rbp),%ebp
15468 _ASM_EXTABLE(1b,ia32_badarg)
15469 ASM_CLAC
15470- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15471- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15472+
15473+#ifdef CONFIG_PAX_MEMORY_UDEREF
15474+ ASM_PAX_CLOSE_USERLAND
15475+#endif
15476+
15477+ GET_THREAD_INFO(%r11)
15478+ orl $TS_COMPAT,TI_status(%r11)
15479+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
15480 CFI_REMEMBER_STATE
15481 jnz sysenter_tracesys
15482 cmpq $(IA32_NR_syscalls-1),%rax
15483@@ -162,15 +209,18 @@ sysenter_do_call:
15484 sysenter_dispatch:
15485 call *ia32_sys_call_table(,%rax,8)
15486 movq %rax,RAX-ARGOFFSET(%rsp)
15487+ GET_THREAD_INFO(%r11)
15488 DISABLE_INTERRUPTS(CLBR_NONE)
15489 TRACE_IRQS_OFF
15490- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15491+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
15492 jnz sysexit_audit
15493 sysexit_from_sys_call:
15494- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15495+ pax_exit_kernel_user
15496+ pax_erase_kstack
15497+ andl $~TS_COMPAT,TI_status(%r11)
15498 /* clear IF, that popfq doesn't enable interrupts early */
15499- andl $~0x200,EFLAGS-R11(%rsp)
15500- movl RIP-R11(%rsp),%edx /* User %eip */
15501+ andl $~X86_EFLAGS_IF,EFLAGS(%rsp)
15502+ movl RIP(%rsp),%edx /* User %eip */
15503 CFI_REGISTER rip,rdx
15504 RESTORE_ARGS 0,24,0,0,0,0
15505 xorq %r8,%r8
15506@@ -193,6 +243,9 @@ sysexit_from_sys_call:
15507 movl %eax,%esi /* 2nd arg: syscall number */
15508 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
15509 call __audit_syscall_entry
15510+
15511+ pax_erase_kstack
15512+
15513 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
15514 cmpq $(IA32_NR_syscalls-1),%rax
15515 ja ia32_badsys
15516@@ -204,7 +257,7 @@ sysexit_from_sys_call:
15517 .endm
15518
15519 .macro auditsys_exit exit
15520- testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15521+ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
15522 jnz ia32_ret_from_sys_call
15523 TRACE_IRQS_ON
15524 ENABLE_INTERRUPTS(CLBR_NONE)
15525@@ -215,11 +268,12 @@ sysexit_from_sys_call:
15526 1: setbe %al /* 1 if error, 0 if not */
15527 movzbl %al,%edi /* zero-extend that into %edi */
15528 call __audit_syscall_exit
15529+ GET_THREAD_INFO(%r11)
15530 movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */
15531 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
15532 DISABLE_INTERRUPTS(CLBR_NONE)
15533 TRACE_IRQS_OFF
15534- testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15535+ testl %edi,TI_flags(%r11)
15536 jz \exit
15537 CLEAR_RREGS -ARGOFFSET
15538 jmp int_with_check
15539@@ -237,7 +291,7 @@ sysexit_audit:
15540
15541 sysenter_tracesys:
15542 #ifdef CONFIG_AUDITSYSCALL
15543- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15544+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
15545 jz sysenter_auditsys
15546 #endif
15547 SAVE_REST
15548@@ -249,6 +303,9 @@ sysenter_tracesys:
15549 RESTORE_REST
15550 cmpq $(IA32_NR_syscalls-1),%rax
15551 ja int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */
15552+
15553+ pax_erase_kstack
15554+
15555 jmp sysenter_do_call
15556 CFI_ENDPROC
15557 ENDPROC(ia32_sysenter_target)
15558@@ -276,19 +333,25 @@ ENDPROC(ia32_sysenter_target)
15559 ENTRY(ia32_cstar_target)
15560 CFI_STARTPROC32 simple
15561 CFI_SIGNAL_FRAME
15562- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
15563+ CFI_DEF_CFA rsp,0
15564 CFI_REGISTER rip,rcx
15565 /*CFI_REGISTER rflags,r11*/
15566 SWAPGS_UNSAFE_STACK
15567 movl %esp,%r8d
15568 CFI_REGISTER rsp,r8
15569 movq PER_CPU_VAR(kernel_stack),%rsp
15570+ SAVE_ARGS 8*6,0,0
15571+ pax_enter_kernel_user
15572+
15573+#ifdef CONFIG_PAX_RANDKSTACK
15574+ pax_erase_kstack
15575+#endif
15576+
15577 /*
15578 * No need to follow this irqs on/off section: the syscall
15579 * disabled irqs and here we enable it straight after entry:
15580 */
15581 ENABLE_INTERRUPTS(CLBR_NONE)
15582- SAVE_ARGS 8,0,0
15583 movl %eax,%eax /* zero extension */
15584 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
15585 movq %rcx,RIP-ARGOFFSET(%rsp)
15586@@ -304,12 +367,25 @@ ENTRY(ia32_cstar_target)
15587 /* no need to do an access_ok check here because r8 has been
15588 32bit zero extended */
15589 /* hardware stack frame is complete now */
15590+
15591+#ifdef CONFIG_PAX_MEMORY_UDEREF
15592+ ASM_PAX_OPEN_USERLAND
15593+ movq pax_user_shadow_base,%r8
15594+ addq RSP-ARGOFFSET(%rsp),%r8
15595+#endif
15596+
15597 ASM_STAC
15598 1: movl (%r8),%r9d
15599 _ASM_EXTABLE(1b,ia32_badarg)
15600 ASM_CLAC
15601- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15602- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15603+
15604+#ifdef CONFIG_PAX_MEMORY_UDEREF
15605+ ASM_PAX_CLOSE_USERLAND
15606+#endif
15607+
15608+ GET_THREAD_INFO(%r11)
15609+ orl $TS_COMPAT,TI_status(%r11)
15610+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
15611 CFI_REMEMBER_STATE
15612 jnz cstar_tracesys
15613 cmpq $IA32_NR_syscalls-1,%rax
15614@@ -319,13 +395,16 @@ cstar_do_call:
15615 cstar_dispatch:
15616 call *ia32_sys_call_table(,%rax,8)
15617 movq %rax,RAX-ARGOFFSET(%rsp)
15618+ GET_THREAD_INFO(%r11)
15619 DISABLE_INTERRUPTS(CLBR_NONE)
15620 TRACE_IRQS_OFF
15621- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15622+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
15623 jnz sysretl_audit
15624 sysretl_from_sys_call:
15625- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15626- RESTORE_ARGS 0,-ARG_SKIP,0,0,0
15627+ pax_exit_kernel_user
15628+ pax_erase_kstack
15629+ andl $~TS_COMPAT,TI_status(%r11)
15630+ RESTORE_ARGS 0,-ORIG_RAX,0,0,0
15631 movl RIP-ARGOFFSET(%rsp),%ecx
15632 CFI_REGISTER rip,rcx
15633 movl EFLAGS-ARGOFFSET(%rsp),%r11d
15634@@ -352,7 +431,7 @@ sysretl_audit:
15635
15636 cstar_tracesys:
15637 #ifdef CONFIG_AUDITSYSCALL
15638- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15639+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
15640 jz cstar_auditsys
15641 #endif
15642 xchgl %r9d,%ebp
15643@@ -366,11 +445,19 @@ cstar_tracesys:
15644 xchgl %ebp,%r9d
15645 cmpq $(IA32_NR_syscalls-1),%rax
15646 ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */
15647+
15648+ pax_erase_kstack
15649+
15650 jmp cstar_do_call
15651 END(ia32_cstar_target)
15652
15653 ia32_badarg:
15654 ASM_CLAC
15655+
15656+#ifdef CONFIG_PAX_MEMORY_UDEREF
15657+ ASM_PAX_CLOSE_USERLAND
15658+#endif
15659+
15660 movq $-EFAULT,%rax
15661 jmp ia32_sysret
15662 CFI_ENDPROC
15663@@ -407,19 +494,26 @@ ENTRY(ia32_syscall)
15664 CFI_REL_OFFSET rip,RIP-RIP
15665 PARAVIRT_ADJUST_EXCEPTION_FRAME
15666 SWAPGS
15667- /*
15668- * No need to follow this irqs on/off section: the syscall
15669- * disabled irqs and here we enable it straight after entry:
15670- */
15671- ENABLE_INTERRUPTS(CLBR_NONE)
15672 movl %eax,%eax
15673 pushq_cfi %rax
15674 cld
15675 /* note the registers are not zero extended to the sf.
15676 this could be a problem. */
15677 SAVE_ARGS 0,1,0
15678- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15679- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15680+ pax_enter_kernel_user
15681+
15682+#ifdef CONFIG_PAX_RANDKSTACK
15683+ pax_erase_kstack
15684+#endif
15685+
15686+ /*
15687+ * No need to follow this irqs on/off section: the syscall
15688+ * disabled irqs and here we enable it straight after entry:
15689+ */
15690+ ENABLE_INTERRUPTS(CLBR_NONE)
15691+ GET_THREAD_INFO(%r11)
15692+ orl $TS_COMPAT,TI_status(%r11)
15693+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
15694 jnz ia32_tracesys
15695 cmpq $(IA32_NR_syscalls-1),%rax
15696 ja ia32_badsys
15697@@ -442,6 +536,9 @@ ia32_tracesys:
15698 RESTORE_REST
15699 cmpq $(IA32_NR_syscalls-1),%rax
15700 ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */
15701+
15702+ pax_erase_kstack
15703+
15704 jmp ia32_do_call
15705 END(ia32_syscall)
15706
15707diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
15708index 8e0ceec..af13504 100644
15709--- a/arch/x86/ia32/sys_ia32.c
15710+++ b/arch/x86/ia32/sys_ia32.c
15711@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
15712 */
15713 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
15714 {
15715- typeof(ubuf->st_uid) uid = 0;
15716- typeof(ubuf->st_gid) gid = 0;
15717+ typeof(((struct stat64 *)0)->st_uid) uid = 0;
15718+ typeof(((struct stat64 *)0)->st_gid) gid = 0;
15719 SET_UID(uid, from_kuid_munged(current_user_ns(), stat->uid));
15720 SET_GID(gid, from_kgid_munged(current_user_ns(), stat->gid));
15721 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
15722diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
15723index 372231c..51b537d 100644
15724--- a/arch/x86/include/asm/alternative-asm.h
15725+++ b/arch/x86/include/asm/alternative-asm.h
15726@@ -18,6 +18,45 @@
15727 .endm
15728 #endif
15729
15730+#ifdef KERNEXEC_PLUGIN
15731+ .macro pax_force_retaddr_bts rip=0
15732+ btsq $63,\rip(%rsp)
15733+ .endm
15734+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
15735+ .macro pax_force_retaddr rip=0, reload=0
15736+ btsq $63,\rip(%rsp)
15737+ .endm
15738+ .macro pax_force_fptr ptr
15739+ btsq $63,\ptr
15740+ .endm
15741+ .macro pax_set_fptr_mask
15742+ .endm
15743+#endif
15744+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
15745+ .macro pax_force_retaddr rip=0, reload=0
15746+ .if \reload
15747+ pax_set_fptr_mask
15748+ .endif
15749+ orq %r12,\rip(%rsp)
15750+ .endm
15751+ .macro pax_force_fptr ptr
15752+ orq %r12,\ptr
15753+ .endm
15754+ .macro pax_set_fptr_mask
15755+ movabs $0x8000000000000000,%r12
15756+ .endm
15757+#endif
15758+#else
15759+ .macro pax_force_retaddr rip=0, reload=0
15760+ .endm
15761+ .macro pax_force_fptr ptr
15762+ .endm
15763+ .macro pax_force_retaddr_bts rip=0
15764+ .endm
15765+ .macro pax_set_fptr_mask
15766+ .endm
15767+#endif
15768+
15769 .macro altinstruction_entry orig alt feature orig_len alt_len
15770 .long \orig - .
15771 .long \alt - .
15772diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
15773index 473bdbe..b1e3377 100644
15774--- a/arch/x86/include/asm/alternative.h
15775+++ b/arch/x86/include/asm/alternative.h
15776@@ -106,7 +106,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
15777 ".pushsection .discard,\"aw\",@progbits\n" \
15778 DISCARD_ENTRY(1) \
15779 ".popsection\n" \
15780- ".pushsection .altinstr_replacement, \"ax\"\n" \
15781+ ".pushsection .altinstr_replacement, \"a\"\n" \
15782 ALTINSTR_REPLACEMENT(newinstr, feature, 1) \
15783 ".popsection"
15784
15785@@ -120,7 +120,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
15786 DISCARD_ENTRY(1) \
15787 DISCARD_ENTRY(2) \
15788 ".popsection\n" \
15789- ".pushsection .altinstr_replacement, \"ax\"\n" \
15790+ ".pushsection .altinstr_replacement, \"a\"\n" \
15791 ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \
15792 ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \
15793 ".popsection"
15794diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
15795index 465b309..ab7e51f 100644
15796--- a/arch/x86/include/asm/apic.h
15797+++ b/arch/x86/include/asm/apic.h
15798@@ -45,7 +45,7 @@ static inline void generic_apic_probe(void)
15799
15800 #ifdef CONFIG_X86_LOCAL_APIC
15801
15802-extern unsigned int apic_verbosity;
15803+extern int apic_verbosity;
15804 extern int local_apic_timer_c2_ok;
15805
15806 extern int disable_apic;
15807diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
15808index 20370c6..a2eb9b0 100644
15809--- a/arch/x86/include/asm/apm.h
15810+++ b/arch/x86/include/asm/apm.h
15811@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
15812 __asm__ __volatile__(APM_DO_ZERO_SEGS
15813 "pushl %%edi\n\t"
15814 "pushl %%ebp\n\t"
15815- "lcall *%%cs:apm_bios_entry\n\t"
15816+ "lcall *%%ss:apm_bios_entry\n\t"
15817 "setc %%al\n\t"
15818 "popl %%ebp\n\t"
15819 "popl %%edi\n\t"
15820@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
15821 __asm__ __volatile__(APM_DO_ZERO_SEGS
15822 "pushl %%edi\n\t"
15823 "pushl %%ebp\n\t"
15824- "lcall *%%cs:apm_bios_entry\n\t"
15825+ "lcall *%%ss:apm_bios_entry\n\t"
15826 "setc %%bl\n\t"
15827 "popl %%ebp\n\t"
15828 "popl %%edi\n\t"
15829diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
15830index 6dd1c7dd..2edd216 100644
15831--- a/arch/x86/include/asm/atomic.h
15832+++ b/arch/x86/include/asm/atomic.h
15833@@ -24,7 +24,18 @@
15834 */
15835 static inline int atomic_read(const atomic_t *v)
15836 {
15837- return (*(volatile int *)&(v)->counter);
15838+ return (*(volatile const int *)&(v)->counter);
15839+}
15840+
15841+/**
15842+ * atomic_read_unchecked - read atomic variable
15843+ * @v: pointer of type atomic_unchecked_t
15844+ *
15845+ * Atomically reads the value of @v.
15846+ */
15847+static inline int __intentional_overflow(-1) atomic_read_unchecked(const atomic_unchecked_t *v)
15848+{
15849+ return (*(volatile const int *)&(v)->counter);
15850 }
15851
15852 /**
15853@@ -40,6 +51,18 @@ static inline void atomic_set(atomic_t *v, int i)
15854 }
15855
15856 /**
15857+ * atomic_set_unchecked - set atomic variable
15858+ * @v: pointer of type atomic_unchecked_t
15859+ * @i: required value
15860+ *
15861+ * Atomically sets the value of @v to @i.
15862+ */
15863+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
15864+{
15865+ v->counter = i;
15866+}
15867+
15868+/**
15869 * atomic_add - add integer to atomic variable
15870 * @i: integer value to add
15871 * @v: pointer of type atomic_t
15872@@ -48,7 +71,29 @@ static inline void atomic_set(atomic_t *v, int i)
15873 */
15874 static inline void atomic_add(int i, atomic_t *v)
15875 {
15876- asm volatile(LOCK_PREFIX "addl %1,%0"
15877+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
15878+
15879+#ifdef CONFIG_PAX_REFCOUNT
15880+ "jno 0f\n"
15881+ LOCK_PREFIX "subl %1,%0\n"
15882+ "int $4\n0:\n"
15883+ _ASM_EXTABLE(0b, 0b)
15884+#endif
15885+
15886+ : "+m" (v->counter)
15887+ : "ir" (i));
15888+}
15889+
15890+/**
15891+ * atomic_add_unchecked - add integer to atomic variable
15892+ * @i: integer value to add
15893+ * @v: pointer of type atomic_unchecked_t
15894+ *
15895+ * Atomically adds @i to @v.
15896+ */
15897+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
15898+{
15899+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
15900 : "+m" (v->counter)
15901 : "ir" (i));
15902 }
15903@@ -62,7 +107,29 @@ static inline void atomic_add(int i, atomic_t *v)
15904 */
15905 static inline void atomic_sub(int i, atomic_t *v)
15906 {
15907- asm volatile(LOCK_PREFIX "subl %1,%0"
15908+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
15909+
15910+#ifdef CONFIG_PAX_REFCOUNT
15911+ "jno 0f\n"
15912+ LOCK_PREFIX "addl %1,%0\n"
15913+ "int $4\n0:\n"
15914+ _ASM_EXTABLE(0b, 0b)
15915+#endif
15916+
15917+ : "+m" (v->counter)
15918+ : "ir" (i));
15919+}
15920+
15921+/**
15922+ * atomic_sub_unchecked - subtract integer from atomic variable
15923+ * @i: integer value to subtract
15924+ * @v: pointer of type atomic_unchecked_t
15925+ *
15926+ * Atomically subtracts @i from @v.
15927+ */
15928+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
15929+{
15930+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
15931 : "+m" (v->counter)
15932 : "ir" (i));
15933 }
15934@@ -78,7 +145,7 @@ static inline void atomic_sub(int i, atomic_t *v)
15935 */
15936 static inline int atomic_sub_and_test(int i, atomic_t *v)
15937 {
15938- GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", "e");
15939+ GEN_BINARY_RMWcc(LOCK_PREFIX "subl", LOCK_PREFIX "addl", v->counter, "er", i, "%0", "e");
15940 }
15941
15942 /**
15943@@ -89,7 +156,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
15944 */
15945 static inline void atomic_inc(atomic_t *v)
15946 {
15947- asm volatile(LOCK_PREFIX "incl %0"
15948+ asm volatile(LOCK_PREFIX "incl %0\n"
15949+
15950+#ifdef CONFIG_PAX_REFCOUNT
15951+ "jno 0f\n"
15952+ LOCK_PREFIX "decl %0\n"
15953+ "int $4\n0:\n"
15954+ _ASM_EXTABLE(0b, 0b)
15955+#endif
15956+
15957+ : "+m" (v->counter));
15958+}
15959+
15960+/**
15961+ * atomic_inc_unchecked - increment atomic variable
15962+ * @v: pointer of type atomic_unchecked_t
15963+ *
15964+ * Atomically increments @v by 1.
15965+ */
15966+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
15967+{
15968+ asm volatile(LOCK_PREFIX "incl %0\n"
15969 : "+m" (v->counter));
15970 }
15971
15972@@ -101,7 +188,27 @@ static inline void atomic_inc(atomic_t *v)
15973 */
15974 static inline void atomic_dec(atomic_t *v)
15975 {
15976- asm volatile(LOCK_PREFIX "decl %0"
15977+ asm volatile(LOCK_PREFIX "decl %0\n"
15978+
15979+#ifdef CONFIG_PAX_REFCOUNT
15980+ "jno 0f\n"
15981+ LOCK_PREFIX "incl %0\n"
15982+ "int $4\n0:\n"
15983+ _ASM_EXTABLE(0b, 0b)
15984+#endif
15985+
15986+ : "+m" (v->counter));
15987+}
15988+
15989+/**
15990+ * atomic_dec_unchecked - decrement atomic variable
15991+ * @v: pointer of type atomic_unchecked_t
15992+ *
15993+ * Atomically decrements @v by 1.
15994+ */
15995+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
15996+{
15997+ asm volatile(LOCK_PREFIX "decl %0\n"
15998 : "+m" (v->counter));
15999 }
16000
16001@@ -115,7 +222,7 @@ static inline void atomic_dec(atomic_t *v)
16002 */
16003 static inline int atomic_dec_and_test(atomic_t *v)
16004 {
16005- GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", "e");
16006+ GEN_UNARY_RMWcc(LOCK_PREFIX "decl", LOCK_PREFIX "incl", v->counter, "%0", "e");
16007 }
16008
16009 /**
16010@@ -128,7 +235,20 @@ static inline int atomic_dec_and_test(atomic_t *v)
16011 */
16012 static inline int atomic_inc_and_test(atomic_t *v)
16013 {
16014- GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", "e");
16015+ GEN_UNARY_RMWcc(LOCK_PREFIX "incl", LOCK_PREFIX "decl", v->counter, "%0", "e");
16016+}
16017+
16018+/**
16019+ * atomic_inc_and_test_unchecked - increment and test
16020+ * @v: pointer of type atomic_unchecked_t
16021+ *
16022+ * Atomically increments @v by 1
16023+ * and returns true if the result is zero, or false for all
16024+ * other cases.
16025+ */
16026+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
16027+{
16028+ GEN_UNARY_RMWcc_unchecked(LOCK_PREFIX "incl", v->counter, "%0", "e");
16029 }
16030
16031 /**
16032@@ -142,7 +262,7 @@ static inline int atomic_inc_and_test(atomic_t *v)
16033 */
16034 static inline int atomic_add_negative(int i, atomic_t *v)
16035 {
16036- GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", "s");
16037+ GEN_BINARY_RMWcc(LOCK_PREFIX "addl", LOCK_PREFIX "subl", v->counter, "er", i, "%0", "s");
16038 }
16039
16040 /**
16041@@ -154,6 +274,18 @@ static inline int atomic_add_negative(int i, atomic_t *v)
16042 */
16043 static inline int atomic_add_return(int i, atomic_t *v)
16044 {
16045+ return i + xadd_check_overflow(&v->counter, i);
16046+}
16047+
16048+/**
16049+ * atomic_add_return_unchecked - add integer and return
16050+ * @i: integer value to add
16051+ * @v: pointer of type atomic_unchecked_t
16052+ *
16053+ * Atomically adds @i to @v and returns @i + @v
16054+ */
16055+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
16056+{
16057 return i + xadd(&v->counter, i);
16058 }
16059
16060@@ -170,9 +302,18 @@ static inline int atomic_sub_return(int i, atomic_t *v)
16061 }
16062
16063 #define atomic_inc_return(v) (atomic_add_return(1, v))
16064+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
16065+{
16066+ return atomic_add_return_unchecked(1, v);
16067+}
16068 #define atomic_dec_return(v) (atomic_sub_return(1, v))
16069
16070-static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
16071+static inline int __intentional_overflow(-1) atomic_cmpxchg(atomic_t *v, int old, int new)
16072+{
16073+ return cmpxchg(&v->counter, old, new);
16074+}
16075+
16076+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
16077 {
16078 return cmpxchg(&v->counter, old, new);
16079 }
16080@@ -182,6 +323,11 @@ static inline int atomic_xchg(atomic_t *v, int new)
16081 return xchg(&v->counter, new);
16082 }
16083
16084+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
16085+{
16086+ return xchg(&v->counter, new);
16087+}
16088+
16089 /**
16090 * __atomic_add_unless - add unless the number is already a given value
16091 * @v: pointer of type atomic_t
16092@@ -191,14 +337,27 @@ static inline int atomic_xchg(atomic_t *v, int new)
16093 * Atomically adds @a to @v, so long as @v was not already @u.
16094 * Returns the old value of @v.
16095 */
16096-static inline int __atomic_add_unless(atomic_t *v, int a, int u)
16097+static inline int __intentional_overflow(-1) __atomic_add_unless(atomic_t *v, int a, int u)
16098 {
16099- int c, old;
16100+ int c, old, new;
16101 c = atomic_read(v);
16102 for (;;) {
16103- if (unlikely(c == (u)))
16104+ if (unlikely(c == u))
16105 break;
16106- old = atomic_cmpxchg((v), c, c + (a));
16107+
16108+ asm volatile("addl %2,%0\n"
16109+
16110+#ifdef CONFIG_PAX_REFCOUNT
16111+ "jno 0f\n"
16112+ "subl %2,%0\n"
16113+ "int $4\n0:\n"
16114+ _ASM_EXTABLE(0b, 0b)
16115+#endif
16116+
16117+ : "=r" (new)
16118+ : "0" (c), "ir" (a));
16119+
16120+ old = atomic_cmpxchg(v, c, new);
16121 if (likely(old == c))
16122 break;
16123 c = old;
16124@@ -207,6 +366,49 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
16125 }
16126
16127 /**
16128+ * atomic_inc_not_zero_hint - increment if not null
16129+ * @v: pointer of type atomic_t
16130+ * @hint: probable value of the atomic before the increment
16131+ *
16132+ * This version of atomic_inc_not_zero() gives a hint of probable
16133+ * value of the atomic. This helps processor to not read the memory
16134+ * before doing the atomic read/modify/write cycle, lowering
16135+ * number of bus transactions on some arches.
16136+ *
16137+ * Returns: 0 if increment was not done, 1 otherwise.
16138+ */
16139+#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
16140+static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
16141+{
16142+ int val, c = hint, new;
16143+
16144+ /* sanity test, should be removed by compiler if hint is a constant */
16145+ if (!hint)
16146+ return __atomic_add_unless(v, 1, 0);
16147+
16148+ do {
16149+ asm volatile("incl %0\n"
16150+
16151+#ifdef CONFIG_PAX_REFCOUNT
16152+ "jno 0f\n"
16153+ "decl %0\n"
16154+ "int $4\n0:\n"
16155+ _ASM_EXTABLE(0b, 0b)
16156+#endif
16157+
16158+ : "=r" (new)
16159+ : "0" (c));
16160+
16161+ val = atomic_cmpxchg(v, c, new);
16162+ if (val == c)
16163+ return 1;
16164+ c = val;
16165+ } while (c);
16166+
16167+ return 0;
16168+}
16169+
16170+/**
16171 * atomic_inc_short - increment of a short integer
16172 * @v: pointer to type int
16173 *
16174@@ -235,14 +437,37 @@ static inline void atomic_or_long(unsigned long *v1, unsigned long v2)
16175 #endif
16176
16177 /* These are x86-specific, used by some header files */
16178-#define atomic_clear_mask(mask, addr) \
16179- asm volatile(LOCK_PREFIX "andl %0,%1" \
16180- : : "r" (~(mask)), "m" (*(addr)) : "memory")
16181+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
16182+{
16183+ asm volatile(LOCK_PREFIX "andl %1,%0"
16184+ : "+m" (v->counter)
16185+ : "r" (~(mask))
16186+ : "memory");
16187+}
16188
16189-#define atomic_set_mask(mask, addr) \
16190- asm volatile(LOCK_PREFIX "orl %0,%1" \
16191- : : "r" ((unsigned)(mask)), "m" (*(addr)) \
16192- : "memory")
16193+static inline void atomic_clear_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
16194+{
16195+ asm volatile(LOCK_PREFIX "andl %1,%0"
16196+ : "+m" (v->counter)
16197+ : "r" (~(mask))
16198+ : "memory");
16199+}
16200+
16201+static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
16202+{
16203+ asm volatile(LOCK_PREFIX "orl %1,%0"
16204+ : "+m" (v->counter)
16205+ : "r" (mask)
16206+ : "memory");
16207+}
16208+
16209+static inline void atomic_set_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
16210+{
16211+ asm volatile(LOCK_PREFIX "orl %1,%0"
16212+ : "+m" (v->counter)
16213+ : "r" (mask)
16214+ : "memory");
16215+}
16216
16217 #ifdef CONFIG_X86_32
16218 # include <asm/atomic64_32.h>
16219diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
16220index b154de7..bf18a5a 100644
16221--- a/arch/x86/include/asm/atomic64_32.h
16222+++ b/arch/x86/include/asm/atomic64_32.h
16223@@ -12,6 +12,14 @@ typedef struct {
16224 u64 __aligned(8) counter;
16225 } atomic64_t;
16226
16227+#ifdef CONFIG_PAX_REFCOUNT
16228+typedef struct {
16229+ u64 __aligned(8) counter;
16230+} atomic64_unchecked_t;
16231+#else
16232+typedef atomic64_t atomic64_unchecked_t;
16233+#endif
16234+
16235 #define ATOMIC64_INIT(val) { (val) }
16236
16237 #define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...)
16238@@ -37,21 +45,31 @@ typedef struct {
16239 ATOMIC64_DECL_ONE(sym##_386)
16240
16241 ATOMIC64_DECL_ONE(add_386);
16242+ATOMIC64_DECL_ONE(add_unchecked_386);
16243 ATOMIC64_DECL_ONE(sub_386);
16244+ATOMIC64_DECL_ONE(sub_unchecked_386);
16245 ATOMIC64_DECL_ONE(inc_386);
16246+ATOMIC64_DECL_ONE(inc_unchecked_386);
16247 ATOMIC64_DECL_ONE(dec_386);
16248+ATOMIC64_DECL_ONE(dec_unchecked_386);
16249 #endif
16250
16251 #define alternative_atomic64(f, out, in...) \
16252 __alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in)
16253
16254 ATOMIC64_DECL(read);
16255+ATOMIC64_DECL(read_unchecked);
16256 ATOMIC64_DECL(set);
16257+ATOMIC64_DECL(set_unchecked);
16258 ATOMIC64_DECL(xchg);
16259 ATOMIC64_DECL(add_return);
16260+ATOMIC64_DECL(add_return_unchecked);
16261 ATOMIC64_DECL(sub_return);
16262+ATOMIC64_DECL(sub_return_unchecked);
16263 ATOMIC64_DECL(inc_return);
16264+ATOMIC64_DECL(inc_return_unchecked);
16265 ATOMIC64_DECL(dec_return);
16266+ATOMIC64_DECL(dec_return_unchecked);
16267 ATOMIC64_DECL(dec_if_positive);
16268 ATOMIC64_DECL(inc_not_zero);
16269 ATOMIC64_DECL(add_unless);
16270@@ -77,6 +95,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
16271 }
16272
16273 /**
16274+ * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
16275+ * @p: pointer to type atomic64_unchecked_t
16276+ * @o: expected value
16277+ * @n: new value
16278+ *
16279+ * Atomically sets @v to @n if it was equal to @o and returns
16280+ * the old value.
16281+ */
16282+
16283+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
16284+{
16285+ return cmpxchg64(&v->counter, o, n);
16286+}
16287+
16288+/**
16289 * atomic64_xchg - xchg atomic64 variable
16290 * @v: pointer to type atomic64_t
16291 * @n: value to assign
16292@@ -112,6 +145,22 @@ static inline void atomic64_set(atomic64_t *v, long long i)
16293 }
16294
16295 /**
16296+ * atomic64_set_unchecked - set atomic64 variable
16297+ * @v: pointer to type atomic64_unchecked_t
16298+ * @n: value to assign
16299+ *
16300+ * Atomically sets the value of @v to @n.
16301+ */
16302+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
16303+{
16304+ unsigned high = (unsigned)(i >> 32);
16305+ unsigned low = (unsigned)i;
16306+ alternative_atomic64(set, /* no output */,
16307+ "S" (v), "b" (low), "c" (high)
16308+ : "eax", "edx", "memory");
16309+}
16310+
16311+/**
16312 * atomic64_read - read atomic64 variable
16313 * @v: pointer to type atomic64_t
16314 *
16315@@ -125,6 +174,19 @@ static inline long long atomic64_read(const atomic64_t *v)
16316 }
16317
16318 /**
16319+ * atomic64_read_unchecked - read atomic64 variable
16320+ * @v: pointer to type atomic64_unchecked_t
16321+ *
16322+ * Atomically reads the value of @v and returns it.
16323+ */
16324+static inline long long __intentional_overflow(-1) atomic64_read_unchecked(atomic64_unchecked_t *v)
16325+{
16326+ long long r;
16327+ alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
16328+ return r;
16329+ }
16330+
16331+/**
16332 * atomic64_add_return - add and return
16333 * @i: integer value to add
16334 * @v: pointer to type atomic64_t
16335@@ -139,6 +201,21 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
16336 return i;
16337 }
16338
16339+/**
16340+ * atomic64_add_return_unchecked - add and return
16341+ * @i: integer value to add
16342+ * @v: pointer to type atomic64_unchecked_t
16343+ *
16344+ * Atomically adds @i to @v and returns @i + *@v
16345+ */
16346+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
16347+{
16348+ alternative_atomic64(add_return_unchecked,
16349+ ASM_OUTPUT2("+A" (i), "+c" (v)),
16350+ ASM_NO_INPUT_CLOBBER("memory"));
16351+ return i;
16352+}
16353+
16354 /*
16355 * Other variants with different arithmetic operators:
16356 */
16357@@ -158,6 +235,14 @@ static inline long long atomic64_inc_return(atomic64_t *v)
16358 return a;
16359 }
16360
16361+static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
16362+{
16363+ long long a;
16364+ alternative_atomic64(inc_return_unchecked, "=&A" (a),
16365+ "S" (v) : "memory", "ecx");
16366+ return a;
16367+}
16368+
16369 static inline long long atomic64_dec_return(atomic64_t *v)
16370 {
16371 long long a;
16372@@ -182,6 +267,21 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
16373 }
16374
16375 /**
16376+ * atomic64_add_unchecked - add integer to atomic64 variable
16377+ * @i: integer value to add
16378+ * @v: pointer to type atomic64_unchecked_t
16379+ *
16380+ * Atomically adds @i to @v.
16381+ */
16382+static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
16383+{
16384+ __alternative_atomic64(add_unchecked, add_return_unchecked,
16385+ ASM_OUTPUT2("+A" (i), "+c" (v)),
16386+ ASM_NO_INPUT_CLOBBER("memory"));
16387+ return i;
16388+}
16389+
16390+/**
16391 * atomic64_sub - subtract the atomic64 variable
16392 * @i: integer value to subtract
16393 * @v: pointer to type atomic64_t
16394diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
16395index 46e9052..ae45136 100644
16396--- a/arch/x86/include/asm/atomic64_64.h
16397+++ b/arch/x86/include/asm/atomic64_64.h
16398@@ -18,7 +18,19 @@
16399 */
16400 static inline long atomic64_read(const atomic64_t *v)
16401 {
16402- return (*(volatile long *)&(v)->counter);
16403+ return (*(volatile const long *)&(v)->counter);
16404+}
16405+
16406+/**
16407+ * atomic64_read_unchecked - read atomic64 variable
16408+ * @v: pointer of type atomic64_unchecked_t
16409+ *
16410+ * Atomically reads the value of @v.
16411+ * Doesn't imply a read memory barrier.
16412+ */
16413+static inline long __intentional_overflow(-1) atomic64_read_unchecked(const atomic64_unchecked_t *v)
16414+{
16415+ return (*(volatile const long *)&(v)->counter);
16416 }
16417
16418 /**
16419@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
16420 }
16421
16422 /**
16423+ * atomic64_set_unchecked - set atomic64 variable
16424+ * @v: pointer to type atomic64_unchecked_t
16425+ * @i: required value
16426+ *
16427+ * Atomically sets the value of @v to @i.
16428+ */
16429+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
16430+{
16431+ v->counter = i;
16432+}
16433+
16434+/**
16435 * atomic64_add - add integer to atomic64 variable
16436 * @i: integer value to add
16437 * @v: pointer to type atomic64_t
16438@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
16439 */
16440 static inline void atomic64_add(long i, atomic64_t *v)
16441 {
16442+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
16443+
16444+#ifdef CONFIG_PAX_REFCOUNT
16445+ "jno 0f\n"
16446+ LOCK_PREFIX "subq %1,%0\n"
16447+ "int $4\n0:\n"
16448+ _ASM_EXTABLE(0b, 0b)
16449+#endif
16450+
16451+ : "=m" (v->counter)
16452+ : "er" (i), "m" (v->counter));
16453+}
16454+
16455+/**
16456+ * atomic64_add_unchecked - add integer to atomic64 variable
16457+ * @i: integer value to add
16458+ * @v: pointer to type atomic64_unchecked_t
16459+ *
16460+ * Atomically adds @i to @v.
16461+ */
16462+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
16463+{
16464 asm volatile(LOCK_PREFIX "addq %1,%0"
16465 : "=m" (v->counter)
16466 : "er" (i), "m" (v->counter));
16467@@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
16468 */
16469 static inline void atomic64_sub(long i, atomic64_t *v)
16470 {
16471- asm volatile(LOCK_PREFIX "subq %1,%0"
16472+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
16473+
16474+#ifdef CONFIG_PAX_REFCOUNT
16475+ "jno 0f\n"
16476+ LOCK_PREFIX "addq %1,%0\n"
16477+ "int $4\n0:\n"
16478+ _ASM_EXTABLE(0b, 0b)
16479+#endif
16480+
16481+ : "=m" (v->counter)
16482+ : "er" (i), "m" (v->counter));
16483+}
16484+
16485+/**
16486+ * atomic64_sub_unchecked - subtract the atomic64 variable
16487+ * @i: integer value to subtract
16488+ * @v: pointer to type atomic64_unchecked_t
16489+ *
16490+ * Atomically subtracts @i from @v.
16491+ */
16492+static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
16493+{
16494+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
16495 : "=m" (v->counter)
16496 : "er" (i), "m" (v->counter));
16497 }
16498@@ -72,7 +140,7 @@ static inline void atomic64_sub(long i, atomic64_t *v)
16499 */
16500 static inline int atomic64_sub_and_test(long i, atomic64_t *v)
16501 {
16502- GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", "e");
16503+ GEN_BINARY_RMWcc(LOCK_PREFIX "subq", LOCK_PREFIX "addq", v->counter, "er", i, "%0", "e");
16504 }
16505
16506 /**
16507@@ -83,6 +151,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
16508 */
16509 static inline void atomic64_inc(atomic64_t *v)
16510 {
16511+ asm volatile(LOCK_PREFIX "incq %0\n"
16512+
16513+#ifdef CONFIG_PAX_REFCOUNT
16514+ "jno 0f\n"
16515+ LOCK_PREFIX "decq %0\n"
16516+ "int $4\n0:\n"
16517+ _ASM_EXTABLE(0b, 0b)
16518+#endif
16519+
16520+ : "=m" (v->counter)
16521+ : "m" (v->counter));
16522+}
16523+
16524+/**
16525+ * atomic64_inc_unchecked - increment atomic64 variable
16526+ * @v: pointer to type atomic64_unchecked_t
16527+ *
16528+ * Atomically increments @v by 1.
16529+ */
16530+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
16531+{
16532 asm volatile(LOCK_PREFIX "incq %0"
16533 : "=m" (v->counter)
16534 : "m" (v->counter));
16535@@ -96,7 +185,28 @@ static inline void atomic64_inc(atomic64_t *v)
16536 */
16537 static inline void atomic64_dec(atomic64_t *v)
16538 {
16539- asm volatile(LOCK_PREFIX "decq %0"
16540+ asm volatile(LOCK_PREFIX "decq %0\n"
16541+
16542+#ifdef CONFIG_PAX_REFCOUNT
16543+ "jno 0f\n"
16544+ LOCK_PREFIX "incq %0\n"
16545+ "int $4\n0:\n"
16546+ _ASM_EXTABLE(0b, 0b)
16547+#endif
16548+
16549+ : "=m" (v->counter)
16550+ : "m" (v->counter));
16551+}
16552+
16553+/**
16554+ * atomic64_dec_unchecked - decrement atomic64 variable
16555+ * @v: pointer to type atomic64_t
16556+ *
16557+ * Atomically decrements @v by 1.
16558+ */
16559+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
16560+{
16561+ asm volatile(LOCK_PREFIX "decq %0\n"
16562 : "=m" (v->counter)
16563 : "m" (v->counter));
16564 }
16565@@ -111,7 +221,7 @@ static inline void atomic64_dec(atomic64_t *v)
16566 */
16567 static inline int atomic64_dec_and_test(atomic64_t *v)
16568 {
16569- GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", "e");
16570+ GEN_UNARY_RMWcc(LOCK_PREFIX "decq", LOCK_PREFIX "incq", v->counter, "%0", "e");
16571 }
16572
16573 /**
16574@@ -124,7 +234,7 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
16575 */
16576 static inline int atomic64_inc_and_test(atomic64_t *v)
16577 {
16578- GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", "e");
16579+ GEN_UNARY_RMWcc(LOCK_PREFIX "incq", LOCK_PREFIX "decq", v->counter, "%0", "e");
16580 }
16581
16582 /**
16583@@ -138,7 +248,7 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
16584 */
16585 static inline int atomic64_add_negative(long i, atomic64_t *v)
16586 {
16587- GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", "s");
16588+ GEN_BINARY_RMWcc(LOCK_PREFIX "addq", LOCK_PREFIX "subq", v->counter, "er", i, "%0", "s");
16589 }
16590
16591 /**
16592@@ -150,6 +260,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
16593 */
16594 static inline long atomic64_add_return(long i, atomic64_t *v)
16595 {
16596+ return i + xadd_check_overflow(&v->counter, i);
16597+}
16598+
16599+/**
16600+ * atomic64_add_return_unchecked - add and return
16601+ * @i: integer value to add
16602+ * @v: pointer to type atomic64_unchecked_t
16603+ *
16604+ * Atomically adds @i to @v and returns @i + @v
16605+ */
16606+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
16607+{
16608 return i + xadd(&v->counter, i);
16609 }
16610
16611@@ -159,6 +281,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
16612 }
16613
16614 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
16615+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
16616+{
16617+ return atomic64_add_return_unchecked(1, v);
16618+}
16619 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
16620
16621 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
16622@@ -166,6 +292,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
16623 return cmpxchg(&v->counter, old, new);
16624 }
16625
16626+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
16627+{
16628+ return cmpxchg(&v->counter, old, new);
16629+}
16630+
16631 static inline long atomic64_xchg(atomic64_t *v, long new)
16632 {
16633 return xchg(&v->counter, new);
16634@@ -182,17 +313,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
16635 */
16636 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
16637 {
16638- long c, old;
16639+ long c, old, new;
16640 c = atomic64_read(v);
16641 for (;;) {
16642- if (unlikely(c == (u)))
16643+ if (unlikely(c == u))
16644 break;
16645- old = atomic64_cmpxchg((v), c, c + (a));
16646+
16647+ asm volatile("add %2,%0\n"
16648+
16649+#ifdef CONFIG_PAX_REFCOUNT
16650+ "jno 0f\n"
16651+ "sub %2,%0\n"
16652+ "int $4\n0:\n"
16653+ _ASM_EXTABLE(0b, 0b)
16654+#endif
16655+
16656+ : "=r" (new)
16657+ : "0" (c), "ir" (a));
16658+
16659+ old = atomic64_cmpxchg(v, c, new);
16660 if (likely(old == c))
16661 break;
16662 c = old;
16663 }
16664- return c != (u);
16665+ return c != u;
16666 }
16667
16668 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
16669diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
16670index 0f4460b..fa1ee19 100644
16671--- a/arch/x86/include/asm/barrier.h
16672+++ b/arch/x86/include/asm/barrier.h
16673@@ -107,7 +107,7 @@
16674 do { \
16675 compiletime_assert_atomic_type(*p); \
16676 smp_mb(); \
16677- ACCESS_ONCE(*p) = (v); \
16678+ ACCESS_ONCE_RW(*p) = (v); \
16679 } while (0)
16680
16681 #define smp_load_acquire(p) \
16682@@ -124,7 +124,7 @@ do { \
16683 do { \
16684 compiletime_assert_atomic_type(*p); \
16685 barrier(); \
16686- ACCESS_ONCE(*p) = (v); \
16687+ ACCESS_ONCE_RW(*p) = (v); \
16688 } while (0)
16689
16690 #define smp_load_acquire(p) \
16691diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
16692index cfe3b95..d01b118 100644
16693--- a/arch/x86/include/asm/bitops.h
16694+++ b/arch/x86/include/asm/bitops.h
16695@@ -50,7 +50,7 @@
16696 * a mask operation on a byte.
16697 */
16698 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
16699-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
16700+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
16701 #define CONST_MASK(nr) (1 << ((nr) & 7))
16702
16703 /**
16704@@ -203,7 +203,7 @@ static inline void change_bit(long nr, volatile unsigned long *addr)
16705 */
16706 static inline int test_and_set_bit(long nr, volatile unsigned long *addr)
16707 {
16708- GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c");
16709+ GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c");
16710 }
16711
16712 /**
16713@@ -249,7 +249,7 @@ static inline int __test_and_set_bit(long nr, volatile unsigned long *addr)
16714 */
16715 static inline int test_and_clear_bit(long nr, volatile unsigned long *addr)
16716 {
16717- GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c");
16718+ GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c");
16719 }
16720
16721 /**
16722@@ -302,7 +302,7 @@ static inline int __test_and_change_bit(long nr, volatile unsigned long *addr)
16723 */
16724 static inline int test_and_change_bit(long nr, volatile unsigned long *addr)
16725 {
16726- GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c");
16727+ GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c");
16728 }
16729
16730 static __always_inline int constant_test_bit(long nr, const volatile unsigned long *addr)
16731@@ -343,7 +343,7 @@ static int test_bit(int nr, const volatile unsigned long *addr);
16732 *
16733 * Undefined if no bit exists, so code should check against 0 first.
16734 */
16735-static inline unsigned long __ffs(unsigned long word)
16736+static inline unsigned long __intentional_overflow(-1) __ffs(unsigned long word)
16737 {
16738 asm("rep; bsf %1,%0"
16739 : "=r" (word)
16740@@ -357,7 +357,7 @@ static inline unsigned long __ffs(unsigned long word)
16741 *
16742 * Undefined if no zero exists, so code should check against ~0UL first.
16743 */
16744-static inline unsigned long ffz(unsigned long word)
16745+static inline unsigned long __intentional_overflow(-1) ffz(unsigned long word)
16746 {
16747 asm("rep; bsf %1,%0"
16748 : "=r" (word)
16749@@ -371,7 +371,7 @@ static inline unsigned long ffz(unsigned long word)
16750 *
16751 * Undefined if no set bit exists, so code should check against 0 first.
16752 */
16753-static inline unsigned long __fls(unsigned long word)
16754+static inline unsigned long __intentional_overflow(-1) __fls(unsigned long word)
16755 {
16756 asm("bsr %1,%0"
16757 : "=r" (word)
16758@@ -434,7 +434,7 @@ static inline int ffs(int x)
16759 * set bit if value is nonzero. The last (most significant) bit is
16760 * at position 32.
16761 */
16762-static inline int fls(int x)
16763+static inline int __intentional_overflow(-1) fls(int x)
16764 {
16765 int r;
16766
16767@@ -476,7 +476,7 @@ static inline int fls(int x)
16768 * at position 64.
16769 */
16770 #ifdef CONFIG_X86_64
16771-static __always_inline int fls64(__u64 x)
16772+static __always_inline __intentional_overflow(-1) int fls64(__u64 x)
16773 {
16774 int bitpos = -1;
16775 /*
16776diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
16777index 4fa687a..60f2d39 100644
16778--- a/arch/x86/include/asm/boot.h
16779+++ b/arch/x86/include/asm/boot.h
16780@@ -6,10 +6,15 @@
16781 #include <uapi/asm/boot.h>
16782
16783 /* Physical address where kernel should be loaded. */
16784-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
16785+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
16786 + (CONFIG_PHYSICAL_ALIGN - 1)) \
16787 & ~(CONFIG_PHYSICAL_ALIGN - 1))
16788
16789+#ifndef __ASSEMBLY__
16790+extern unsigned char __LOAD_PHYSICAL_ADDR[];
16791+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
16792+#endif
16793+
16794 /* Minimum kernel alignment, as a power of two */
16795 #ifdef CONFIG_X86_64
16796 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
16797diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
16798index 48f99f1..d78ebf9 100644
16799--- a/arch/x86/include/asm/cache.h
16800+++ b/arch/x86/include/asm/cache.h
16801@@ -5,12 +5,13 @@
16802
16803 /* L1 cache line size */
16804 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
16805-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
16806+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
16807
16808 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
16809+#define __read_only __attribute__((__section__(".data..read_only")))
16810
16811 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
16812-#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
16813+#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
16814
16815 #ifdef CONFIG_X86_VSMP
16816 #ifdef CONFIG_SMP
16817diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
16818index 9863ee3..4a1f8e1 100644
16819--- a/arch/x86/include/asm/cacheflush.h
16820+++ b/arch/x86/include/asm/cacheflush.h
16821@@ -27,7 +27,7 @@ static inline unsigned long get_page_memtype(struct page *pg)
16822 unsigned long pg_flags = pg->flags & _PGMT_MASK;
16823
16824 if (pg_flags == _PGMT_DEFAULT)
16825- return -1;
16826+ return ~0UL;
16827 else if (pg_flags == _PGMT_WC)
16828 return _PAGE_CACHE_WC;
16829 else if (pg_flags == _PGMT_UC_MINUS)
16830diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h
16831index cb4c73b..c473c29 100644
16832--- a/arch/x86/include/asm/calling.h
16833+++ b/arch/x86/include/asm/calling.h
16834@@ -82,103 +82,113 @@ For 32-bit we have the following conventions - kernel is built with
16835 #define RSP 152
16836 #define SS 160
16837
16838-#define ARGOFFSET R11
16839-#define SWFRAME ORIG_RAX
16840+#define ARGOFFSET R15
16841
16842 .macro SAVE_ARGS addskip=0, save_rcx=1, save_r891011=1
16843- subq $9*8+\addskip, %rsp
16844- CFI_ADJUST_CFA_OFFSET 9*8+\addskip
16845- movq_cfi rdi, 8*8
16846- movq_cfi rsi, 7*8
16847- movq_cfi rdx, 6*8
16848+ subq $ORIG_RAX-ARGOFFSET+\addskip, %rsp
16849+ CFI_ADJUST_CFA_OFFSET ORIG_RAX-ARGOFFSET+\addskip
16850+ movq_cfi rdi, RDI
16851+ movq_cfi rsi, RSI
16852+ movq_cfi rdx, RDX
16853
16854 .if \save_rcx
16855- movq_cfi rcx, 5*8
16856+ movq_cfi rcx, RCX
16857 .endif
16858
16859- movq_cfi rax, 4*8
16860+ movq_cfi rax, RAX
16861
16862 .if \save_r891011
16863- movq_cfi r8, 3*8
16864- movq_cfi r9, 2*8
16865- movq_cfi r10, 1*8
16866- movq_cfi r11, 0*8
16867+ movq_cfi r8, R8
16868+ movq_cfi r9, R9
16869+ movq_cfi r10, R10
16870+ movq_cfi r11, R11
16871 .endif
16872
16873+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
16874+ movq_cfi r12, R12
16875+#endif
16876+
16877 .endm
16878
16879-#define ARG_SKIP (9*8)
16880+#define ARG_SKIP ORIG_RAX
16881
16882 .macro RESTORE_ARGS rstor_rax=1, addskip=0, rstor_rcx=1, rstor_r11=1, \
16883 rstor_r8910=1, rstor_rdx=1
16884+
16885+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
16886+ movq_cfi_restore R12, r12
16887+#endif
16888+
16889 .if \rstor_r11
16890- movq_cfi_restore 0*8, r11
16891+ movq_cfi_restore R11, r11
16892 .endif
16893
16894 .if \rstor_r8910
16895- movq_cfi_restore 1*8, r10
16896- movq_cfi_restore 2*8, r9
16897- movq_cfi_restore 3*8, r8
16898+ movq_cfi_restore R10, r10
16899+ movq_cfi_restore R9, r9
16900+ movq_cfi_restore R8, r8
16901 .endif
16902
16903 .if \rstor_rax
16904- movq_cfi_restore 4*8, rax
16905+ movq_cfi_restore RAX, rax
16906 .endif
16907
16908 .if \rstor_rcx
16909- movq_cfi_restore 5*8, rcx
16910+ movq_cfi_restore RCX, rcx
16911 .endif
16912
16913 .if \rstor_rdx
16914- movq_cfi_restore 6*8, rdx
16915+ movq_cfi_restore RDX, rdx
16916 .endif
16917
16918- movq_cfi_restore 7*8, rsi
16919- movq_cfi_restore 8*8, rdi
16920+ movq_cfi_restore RSI, rsi
16921+ movq_cfi_restore RDI, rdi
16922
16923- .if ARG_SKIP+\addskip > 0
16924- addq $ARG_SKIP+\addskip, %rsp
16925- CFI_ADJUST_CFA_OFFSET -(ARG_SKIP+\addskip)
16926+ .if ORIG_RAX+\addskip > 0
16927+ addq $ORIG_RAX+\addskip, %rsp
16928+ CFI_ADJUST_CFA_OFFSET -(ORIG_RAX+\addskip)
16929 .endif
16930 .endm
16931
16932- .macro LOAD_ARGS offset, skiprax=0
16933- movq \offset(%rsp), %r11
16934- movq \offset+8(%rsp), %r10
16935- movq \offset+16(%rsp), %r9
16936- movq \offset+24(%rsp), %r8
16937- movq \offset+40(%rsp), %rcx
16938- movq \offset+48(%rsp), %rdx
16939- movq \offset+56(%rsp), %rsi
16940- movq \offset+64(%rsp), %rdi
16941+ .macro LOAD_ARGS skiprax=0
16942+ movq R11(%rsp), %r11
16943+ movq R10(%rsp), %r10
16944+ movq R9(%rsp), %r9
16945+ movq R8(%rsp), %r8
16946+ movq RCX(%rsp), %rcx
16947+ movq RDX(%rsp), %rdx
16948+ movq RSI(%rsp), %rsi
16949+ movq RDI(%rsp), %rdi
16950 .if \skiprax
16951 .else
16952- movq \offset+72(%rsp), %rax
16953+ movq RAX(%rsp), %rax
16954 .endif
16955 .endm
16956
16957-#define REST_SKIP (6*8)
16958-
16959 .macro SAVE_REST
16960- subq $REST_SKIP, %rsp
16961- CFI_ADJUST_CFA_OFFSET REST_SKIP
16962- movq_cfi rbx, 5*8
16963- movq_cfi rbp, 4*8
16964- movq_cfi r12, 3*8
16965- movq_cfi r13, 2*8
16966- movq_cfi r14, 1*8
16967- movq_cfi r15, 0*8
16968+ movq_cfi rbx, RBX
16969+ movq_cfi rbp, RBP
16970+
16971+#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
16972+ movq_cfi r12, R12
16973+#endif
16974+
16975+ movq_cfi r13, R13
16976+ movq_cfi r14, R14
16977+ movq_cfi r15, R15
16978 .endm
16979
16980 .macro RESTORE_REST
16981- movq_cfi_restore 0*8, r15
16982- movq_cfi_restore 1*8, r14
16983- movq_cfi_restore 2*8, r13
16984- movq_cfi_restore 3*8, r12
16985- movq_cfi_restore 4*8, rbp
16986- movq_cfi_restore 5*8, rbx
16987- addq $REST_SKIP, %rsp
16988- CFI_ADJUST_CFA_OFFSET -(REST_SKIP)
16989+ movq_cfi_restore R15, r15
16990+ movq_cfi_restore R14, r14
16991+ movq_cfi_restore R13, r13
16992+
16993+#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
16994+ movq_cfi_restore R12, r12
16995+#endif
16996+
16997+ movq_cfi_restore RBP, rbp
16998+ movq_cfi_restore RBX, rbx
16999 .endm
17000
17001 .macro SAVE_ALL
17002diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
17003index f50de69..2b0a458 100644
17004--- a/arch/x86/include/asm/checksum_32.h
17005+++ b/arch/x86/include/asm/checksum_32.h
17006@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
17007 int len, __wsum sum,
17008 int *src_err_ptr, int *dst_err_ptr);
17009
17010+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
17011+ int len, __wsum sum,
17012+ int *src_err_ptr, int *dst_err_ptr);
17013+
17014+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
17015+ int len, __wsum sum,
17016+ int *src_err_ptr, int *dst_err_ptr);
17017+
17018 /*
17019 * Note: when you get a NULL pointer exception here this means someone
17020 * passed in an incorrect kernel address to one of these functions.
17021@@ -53,7 +61,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
17022
17023 might_sleep();
17024 stac();
17025- ret = csum_partial_copy_generic((__force void *)src, dst,
17026+ ret = csum_partial_copy_generic_from_user((__force void *)src, dst,
17027 len, sum, err_ptr, NULL);
17028 clac();
17029
17030@@ -187,7 +195,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
17031 might_sleep();
17032 if (access_ok(VERIFY_WRITE, dst, len)) {
17033 stac();
17034- ret = csum_partial_copy_generic(src, (__force void *)dst,
17035+ ret = csum_partial_copy_generic_to_user(src, (__force void *)dst,
17036 len, sum, NULL, err_ptr);
17037 clac();
17038 return ret;
17039diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
17040index 99c105d7..2f667ac 100644
17041--- a/arch/x86/include/asm/cmpxchg.h
17042+++ b/arch/x86/include/asm/cmpxchg.h
17043@@ -16,8 +16,12 @@ extern void __cmpxchg_wrong_size(void)
17044 __compiletime_error("Bad argument size for cmpxchg");
17045 extern void __xadd_wrong_size(void)
17046 __compiletime_error("Bad argument size for xadd");
17047+extern void __xadd_check_overflow_wrong_size(void)
17048+ __compiletime_error("Bad argument size for xadd_check_overflow");
17049 extern void __add_wrong_size(void)
17050 __compiletime_error("Bad argument size for add");
17051+extern void __add_check_overflow_wrong_size(void)
17052+ __compiletime_error("Bad argument size for add_check_overflow");
17053
17054 /*
17055 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
17056@@ -69,6 +73,38 @@ extern void __add_wrong_size(void)
17057 __ret; \
17058 })
17059
17060+#ifdef CONFIG_PAX_REFCOUNT
17061+#define __xchg_op_check_overflow(ptr, arg, op, lock) \
17062+ ({ \
17063+ __typeof__ (*(ptr)) __ret = (arg); \
17064+ switch (sizeof(*(ptr))) { \
17065+ case __X86_CASE_L: \
17066+ asm volatile (lock #op "l %0, %1\n" \
17067+ "jno 0f\n" \
17068+ "mov %0,%1\n" \
17069+ "int $4\n0:\n" \
17070+ _ASM_EXTABLE(0b, 0b) \
17071+ : "+r" (__ret), "+m" (*(ptr)) \
17072+ : : "memory", "cc"); \
17073+ break; \
17074+ case __X86_CASE_Q: \
17075+ asm volatile (lock #op "q %q0, %1\n" \
17076+ "jno 0f\n" \
17077+ "mov %0,%1\n" \
17078+ "int $4\n0:\n" \
17079+ _ASM_EXTABLE(0b, 0b) \
17080+ : "+r" (__ret), "+m" (*(ptr)) \
17081+ : : "memory", "cc"); \
17082+ break; \
17083+ default: \
17084+ __ ## op ## _check_overflow_wrong_size(); \
17085+ } \
17086+ __ret; \
17087+ })
17088+#else
17089+#define __xchg_op_check_overflow(ptr, arg, op, lock) __xchg_op(ptr, arg, op, lock)
17090+#endif
17091+
17092 /*
17093 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
17094 * Since this is generally used to protect other memory information, we
17095@@ -167,6 +203,9 @@ extern void __add_wrong_size(void)
17096 #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
17097 #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
17098
17099+#define __xadd_check_overflow(ptr, inc, lock) __xchg_op_check_overflow((ptr), (inc), xadd, lock)
17100+#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
17101+
17102 #define __add(ptr, inc, lock) \
17103 ({ \
17104 __typeof__ (*(ptr)) __ret = (inc); \
17105diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
17106index 59c6c40..5e0b22c 100644
17107--- a/arch/x86/include/asm/compat.h
17108+++ b/arch/x86/include/asm/compat.h
17109@@ -41,7 +41,7 @@ typedef s64 __attribute__((aligned(4))) compat_s64;
17110 typedef u32 compat_uint_t;
17111 typedef u32 compat_ulong_t;
17112 typedef u64 __attribute__((aligned(4))) compat_u64;
17113-typedef u32 compat_uptr_t;
17114+typedef u32 __user compat_uptr_t;
17115
17116 struct compat_timespec {
17117 compat_time_t tv_sec;
17118diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
17119index bb9b258..5fad1bf 100644
17120--- a/arch/x86/include/asm/cpufeature.h
17121+++ b/arch/x86/include/asm/cpufeature.h
17122@@ -203,14 +203,14 @@
17123 #define X86_FEATURE_PAUSEFILTER ( 8*32+13) /* AMD filtered pause intercept */
17124 #define X86_FEATURE_PFTHRESHOLD ( 8*32+14) /* AMD pause filter threshold */
17125
17126-
17127+#define X86_FEATURE_STRONGUDEREF (8*32+31) /* PaX PCID based strong UDEREF */
17128 /* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
17129 #define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
17130 #define X86_FEATURE_TSC_ADJUST ( 9*32+ 1) /* TSC adjustment MSR 0x3b */
17131 #define X86_FEATURE_BMI1 ( 9*32+ 3) /* 1st group bit manipulation extensions */
17132 #define X86_FEATURE_HLE ( 9*32+ 4) /* Hardware Lock Elision */
17133 #define X86_FEATURE_AVX2 ( 9*32+ 5) /* AVX2 instructions */
17134-#define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Protection */
17135+#define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Prevention */
17136 #define X86_FEATURE_BMI2 ( 9*32+ 8) /* 2nd group bit manipulation extensions */
17137 #define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB */
17138 #define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */
17139@@ -370,6 +370,7 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
17140 #undef cpu_has_centaur_mcr
17141 #define cpu_has_centaur_mcr 0
17142
17143+#define cpu_has_pcid boot_cpu_has(X86_FEATURE_PCID)
17144 #endif /* CONFIG_X86_64 */
17145
17146 #if __GNUC__ >= 4
17147@@ -422,7 +423,8 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
17148
17149 #ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS
17150 t_warn:
17151- warn_pre_alternatives();
17152+ if (bit != X86_FEATURE_PCID && bit != X86_FEATURE_INVPCID)
17153+ warn_pre_alternatives();
17154 return false;
17155 #endif
17156
17157@@ -442,7 +444,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
17158 ".section .discard,\"aw\",@progbits\n"
17159 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
17160 ".previous\n"
17161- ".section .altinstr_replacement,\"ax\"\n"
17162+ ".section .altinstr_replacement,\"a\"\n"
17163 "3: movb $1,%0\n"
17164 "4:\n"
17165 ".previous\n"
17166@@ -479,7 +481,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
17167 " .byte 2b - 1b\n" /* src len */
17168 " .byte 4f - 3f\n" /* repl len */
17169 ".previous\n"
17170- ".section .altinstr_replacement,\"ax\"\n"
17171+ ".section .altinstr_replacement,\"a\"\n"
17172 "3: .byte 0xe9\n .long %l[t_no] - 2b\n"
17173 "4:\n"
17174 ".previous\n"
17175@@ -512,7 +514,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
17176 ".section .discard,\"aw\",@progbits\n"
17177 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
17178 ".previous\n"
17179- ".section .altinstr_replacement,\"ax\"\n"
17180+ ".section .altinstr_replacement,\"a\"\n"
17181 "3: movb $0,%0\n"
17182 "4:\n"
17183 ".previous\n"
17184@@ -526,7 +528,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
17185 ".section .discard,\"aw\",@progbits\n"
17186 " .byte 0xff + (6f-5f) - (4b-3b)\n" /* size check */
17187 ".previous\n"
17188- ".section .altinstr_replacement,\"ax\"\n"
17189+ ".section .altinstr_replacement,\"a\"\n"
17190 "5: movb $1,%0\n"
17191 "6:\n"
17192 ".previous\n"
17193diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
17194index 50d033a..37deb26 100644
17195--- a/arch/x86/include/asm/desc.h
17196+++ b/arch/x86/include/asm/desc.h
17197@@ -4,6 +4,7 @@
17198 #include <asm/desc_defs.h>
17199 #include <asm/ldt.h>
17200 #include <asm/mmu.h>
17201+#include <asm/pgtable.h>
17202
17203 #include <linux/smp.h>
17204 #include <linux/percpu.h>
17205@@ -17,6 +18,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
17206
17207 desc->type = (info->read_exec_only ^ 1) << 1;
17208 desc->type |= info->contents << 2;
17209+ desc->type |= info->seg_not_present ^ 1;
17210
17211 desc->s = 1;
17212 desc->dpl = 0x3;
17213@@ -35,19 +37,14 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
17214 }
17215
17216 extern struct desc_ptr idt_descr;
17217-extern gate_desc idt_table[];
17218-extern struct desc_ptr debug_idt_descr;
17219-extern gate_desc debug_idt_table[];
17220-
17221-struct gdt_page {
17222- struct desc_struct gdt[GDT_ENTRIES];
17223-} __attribute__((aligned(PAGE_SIZE)));
17224-
17225-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
17226+extern gate_desc idt_table[IDT_ENTRIES];
17227+extern const struct desc_ptr debug_idt_descr;
17228+extern gate_desc debug_idt_table[IDT_ENTRIES];
17229
17230+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
17231 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
17232 {
17233- return per_cpu(gdt_page, cpu).gdt;
17234+ return cpu_gdt_table[cpu];
17235 }
17236
17237 #ifdef CONFIG_X86_64
17238@@ -72,8 +69,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
17239 unsigned long base, unsigned dpl, unsigned flags,
17240 unsigned short seg)
17241 {
17242- gate->a = (seg << 16) | (base & 0xffff);
17243- gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
17244+ gate->gate.offset_low = base;
17245+ gate->gate.seg = seg;
17246+ gate->gate.reserved = 0;
17247+ gate->gate.type = type;
17248+ gate->gate.s = 0;
17249+ gate->gate.dpl = dpl;
17250+ gate->gate.p = 1;
17251+ gate->gate.offset_high = base >> 16;
17252 }
17253
17254 #endif
17255@@ -118,12 +121,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
17256
17257 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
17258 {
17259+ pax_open_kernel();
17260 memcpy(&idt[entry], gate, sizeof(*gate));
17261+ pax_close_kernel();
17262 }
17263
17264 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
17265 {
17266+ pax_open_kernel();
17267 memcpy(&ldt[entry], desc, 8);
17268+ pax_close_kernel();
17269 }
17270
17271 static inline void
17272@@ -137,7 +144,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
17273 default: size = sizeof(*gdt); break;
17274 }
17275
17276+ pax_open_kernel();
17277 memcpy(&gdt[entry], desc, size);
17278+ pax_close_kernel();
17279 }
17280
17281 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
17282@@ -210,7 +219,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
17283
17284 static inline void native_load_tr_desc(void)
17285 {
17286+ pax_open_kernel();
17287 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
17288+ pax_close_kernel();
17289 }
17290
17291 static inline void native_load_gdt(const struct desc_ptr *dtr)
17292@@ -247,8 +258,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
17293 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
17294 unsigned int i;
17295
17296+ pax_open_kernel();
17297 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
17298 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
17299+ pax_close_kernel();
17300 }
17301
17302 #define _LDT_empty(info) \
17303@@ -287,7 +300,7 @@ static inline void load_LDT(mm_context_t *pc)
17304 preempt_enable();
17305 }
17306
17307-static inline unsigned long get_desc_base(const struct desc_struct *desc)
17308+static inline unsigned long __intentional_overflow(-1) get_desc_base(const struct desc_struct *desc)
17309 {
17310 return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
17311 }
17312@@ -311,7 +324,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
17313 }
17314
17315 #ifdef CONFIG_X86_64
17316-static inline void set_nmi_gate(int gate, void *addr)
17317+static inline void set_nmi_gate(int gate, const void *addr)
17318 {
17319 gate_desc s;
17320
17321@@ -321,14 +334,14 @@ static inline void set_nmi_gate(int gate, void *addr)
17322 #endif
17323
17324 #ifdef CONFIG_TRACING
17325-extern struct desc_ptr trace_idt_descr;
17326-extern gate_desc trace_idt_table[];
17327+extern const struct desc_ptr trace_idt_descr;
17328+extern gate_desc trace_idt_table[IDT_ENTRIES];
17329 static inline void write_trace_idt_entry(int entry, const gate_desc *gate)
17330 {
17331 write_idt_entry(trace_idt_table, entry, gate);
17332 }
17333
17334-static inline void _trace_set_gate(int gate, unsigned type, void *addr,
17335+static inline void _trace_set_gate(int gate, unsigned type, const void *addr,
17336 unsigned dpl, unsigned ist, unsigned seg)
17337 {
17338 gate_desc s;
17339@@ -348,7 +361,7 @@ static inline void write_trace_idt_entry(int entry, const gate_desc *gate)
17340 #define _trace_set_gate(gate, type, addr, dpl, ist, seg)
17341 #endif
17342
17343-static inline void _set_gate(int gate, unsigned type, void *addr,
17344+static inline void _set_gate(int gate, unsigned type, const void *addr,
17345 unsigned dpl, unsigned ist, unsigned seg)
17346 {
17347 gate_desc s;
17348@@ -371,9 +384,9 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
17349 #define set_intr_gate(n, addr) \
17350 do { \
17351 BUG_ON((unsigned)n > 0xFF); \
17352- _set_gate(n, GATE_INTERRUPT, (void *)addr, 0, 0, \
17353+ _set_gate(n, GATE_INTERRUPT, (const void *)addr, 0, 0, \
17354 __KERNEL_CS); \
17355- _trace_set_gate(n, GATE_INTERRUPT, (void *)trace_##addr,\
17356+ _trace_set_gate(n, GATE_INTERRUPT, (const void *)trace_##addr,\
17357 0, 0, __KERNEL_CS); \
17358 } while (0)
17359
17360@@ -401,19 +414,19 @@ static inline void alloc_system_vector(int vector)
17361 /*
17362 * This routine sets up an interrupt gate at directory privilege level 3.
17363 */
17364-static inline void set_system_intr_gate(unsigned int n, void *addr)
17365+static inline void set_system_intr_gate(unsigned int n, const void *addr)
17366 {
17367 BUG_ON((unsigned)n > 0xFF);
17368 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
17369 }
17370
17371-static inline void set_system_trap_gate(unsigned int n, void *addr)
17372+static inline void set_system_trap_gate(unsigned int n, const void *addr)
17373 {
17374 BUG_ON((unsigned)n > 0xFF);
17375 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
17376 }
17377
17378-static inline void set_trap_gate(unsigned int n, void *addr)
17379+static inline void set_trap_gate(unsigned int n, const void *addr)
17380 {
17381 BUG_ON((unsigned)n > 0xFF);
17382 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
17383@@ -422,16 +435,16 @@ static inline void set_trap_gate(unsigned int n, void *addr)
17384 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
17385 {
17386 BUG_ON((unsigned)n > 0xFF);
17387- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
17388+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
17389 }
17390
17391-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
17392+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
17393 {
17394 BUG_ON((unsigned)n > 0xFF);
17395 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
17396 }
17397
17398-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
17399+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
17400 {
17401 BUG_ON((unsigned)n > 0xFF);
17402 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
17403@@ -503,4 +516,17 @@ static inline void load_current_idt(void)
17404 else
17405 load_idt((const struct desc_ptr *)&idt_descr);
17406 }
17407+
17408+#ifdef CONFIG_X86_32
17409+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
17410+{
17411+ struct desc_struct d;
17412+
17413+ if (likely(limit))
17414+ limit = (limit - 1UL) >> PAGE_SHIFT;
17415+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
17416+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
17417+}
17418+#endif
17419+
17420 #endif /* _ASM_X86_DESC_H */
17421diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
17422index 278441f..b95a174 100644
17423--- a/arch/x86/include/asm/desc_defs.h
17424+++ b/arch/x86/include/asm/desc_defs.h
17425@@ -31,6 +31,12 @@ struct desc_struct {
17426 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
17427 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
17428 };
17429+ struct {
17430+ u16 offset_low;
17431+ u16 seg;
17432+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
17433+ unsigned offset_high: 16;
17434+ } gate;
17435 };
17436 } __attribute__((packed));
17437
17438diff --git a/arch/x86/include/asm/div64.h b/arch/x86/include/asm/div64.h
17439index ced283a..ffe04cc 100644
17440--- a/arch/x86/include/asm/div64.h
17441+++ b/arch/x86/include/asm/div64.h
17442@@ -39,7 +39,7 @@
17443 __mod; \
17444 })
17445
17446-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
17447+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
17448 {
17449 union {
17450 u64 v64;
17451diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
17452index 1a055c8..1a5082a 100644
17453--- a/arch/x86/include/asm/elf.h
17454+++ b/arch/x86/include/asm/elf.h
17455@@ -75,9 +75,6 @@ typedef struct user_fxsr_struct elf_fpxregset_t;
17456
17457 #include <asm/vdso.h>
17458
17459-#ifdef CONFIG_X86_64
17460-extern unsigned int vdso64_enabled;
17461-#endif
17462 #if defined(CONFIG_X86_32) || defined(CONFIG_COMPAT)
17463 extern unsigned int vdso32_enabled;
17464 #endif
17465@@ -160,8 +157,9 @@ do { \
17466 #define elf_check_arch(x) \
17467 ((x)->e_machine == EM_X86_64)
17468
17469-#define compat_elf_check_arch(x) \
17470- (elf_check_arch_ia32(x) || (x)->e_machine == EM_X86_64)
17471+#define compat_elf_check_arch(x) \
17472+ (elf_check_arch_ia32(x) || \
17473+ (IS_ENABLED(CONFIG_X86_X32_ABI) && (x)->e_machine == EM_X86_64))
17474
17475 #if __USER32_DS != __USER_DS
17476 # error "The following code assumes __USER32_DS == __USER_DS"
17477@@ -248,7 +246,25 @@ extern int force_personality32;
17478 the loader. We need to make sure that it is out of the way of the program
17479 that it will "exec", and that there is sufficient room for the brk. */
17480
17481+#ifdef CONFIG_PAX_SEGMEXEC
17482+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
17483+#else
17484 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
17485+#endif
17486+
17487+#ifdef CONFIG_PAX_ASLR
17488+#ifdef CONFIG_X86_32
17489+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
17490+
17491+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
17492+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
17493+#else
17494+#define PAX_ELF_ET_DYN_BASE 0x400000UL
17495+
17496+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
17497+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
17498+#endif
17499+#endif
17500
17501 /* This yields a mask that user programs can use to figure out what
17502 instruction set this CPU supports. This could be done in user space,
17503@@ -297,17 +313,13 @@ do { \
17504
17505 #define ARCH_DLINFO \
17506 do { \
17507- if (vdso64_enabled) \
17508- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
17509- (unsigned long __force)current->mm->context.vdso); \
17510+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
17511 } while (0)
17512
17513 /* As a historical oddity, the x32 and x86_64 vDSOs are controlled together. */
17514 #define ARCH_DLINFO_X32 \
17515 do { \
17516- if (vdso64_enabled) \
17517- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
17518- (unsigned long __force)current->mm->context.vdso); \
17519+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
17520 } while (0)
17521
17522 #define AT_SYSINFO 32
17523@@ -322,10 +334,10 @@ else \
17524
17525 #endif /* !CONFIG_X86_32 */
17526
17527-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
17528+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
17529
17530 #define VDSO_ENTRY \
17531- ((unsigned long)current->mm->context.vdso + \
17532+ (current->mm->context.vdso + \
17533 selected_vdso32->sym___kernel_vsyscall)
17534
17535 struct linux_binprm;
17536@@ -337,9 +349,6 @@ extern int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
17537 int uses_interp);
17538 #define compat_arch_setup_additional_pages compat_arch_setup_additional_pages
17539
17540-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
17541-#define arch_randomize_brk arch_randomize_brk
17542-
17543 /*
17544 * True on X86_32 or when emulating IA32 on X86_64
17545 */
17546diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
17547index 77a99ac..39ff7f5 100644
17548--- a/arch/x86/include/asm/emergency-restart.h
17549+++ b/arch/x86/include/asm/emergency-restart.h
17550@@ -1,6 +1,6 @@
17551 #ifndef _ASM_X86_EMERGENCY_RESTART_H
17552 #define _ASM_X86_EMERGENCY_RESTART_H
17553
17554-extern void machine_emergency_restart(void);
17555+extern void machine_emergency_restart(void) __noreturn;
17556
17557 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
17558diff --git a/arch/x86/include/asm/floppy.h b/arch/x86/include/asm/floppy.h
17559index 1c7eefe..d0e4702 100644
17560--- a/arch/x86/include/asm/floppy.h
17561+++ b/arch/x86/include/asm/floppy.h
17562@@ -229,18 +229,18 @@ static struct fd_routine_l {
17563 int (*_dma_setup)(char *addr, unsigned long size, int mode, int io);
17564 } fd_routine[] = {
17565 {
17566- request_dma,
17567- free_dma,
17568- get_dma_residue,
17569- dma_mem_alloc,
17570- hard_dma_setup
17571+ ._request_dma = request_dma,
17572+ ._free_dma = free_dma,
17573+ ._get_dma_residue = get_dma_residue,
17574+ ._dma_mem_alloc = dma_mem_alloc,
17575+ ._dma_setup = hard_dma_setup
17576 },
17577 {
17578- vdma_request_dma,
17579- vdma_nop,
17580- vdma_get_dma_residue,
17581- vdma_mem_alloc,
17582- vdma_dma_setup
17583+ ._request_dma = vdma_request_dma,
17584+ ._free_dma = vdma_nop,
17585+ ._get_dma_residue = vdma_get_dma_residue,
17586+ ._dma_mem_alloc = vdma_mem_alloc,
17587+ ._dma_setup = vdma_dma_setup
17588 }
17589 };
17590
17591diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
17592index 412ecec..c1ea43a 100644
17593--- a/arch/x86/include/asm/fpu-internal.h
17594+++ b/arch/x86/include/asm/fpu-internal.h
17595@@ -124,8 +124,11 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
17596 #define user_insn(insn, output, input...) \
17597 ({ \
17598 int err; \
17599+ pax_open_userland(); \
17600 asm volatile(ASM_STAC "\n" \
17601- "1:" #insn "\n\t" \
17602+ "1:" \
17603+ __copyuser_seg \
17604+ #insn "\n\t" \
17605 "2: " ASM_CLAC "\n" \
17606 ".section .fixup,\"ax\"\n" \
17607 "3: movl $-1,%[err]\n" \
17608@@ -134,6 +137,7 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
17609 _ASM_EXTABLE(1b, 3b) \
17610 : [err] "=r" (err), output \
17611 : "0"(0), input); \
17612+ pax_close_userland(); \
17613 err; \
17614 })
17615
17616@@ -298,7 +302,7 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
17617 "fnclex\n\t"
17618 "emms\n\t"
17619 "fildl %P[addr]" /* set F?P to defined value */
17620- : : [addr] "m" (tsk->thread.fpu.has_fpu));
17621+ : : [addr] "m" (init_tss[raw_smp_processor_id()].x86_tss.sp0));
17622 }
17623
17624 return fpu_restore_checking(&tsk->thread.fpu);
17625diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
17626index b4c1f54..e290c08 100644
17627--- a/arch/x86/include/asm/futex.h
17628+++ b/arch/x86/include/asm/futex.h
17629@@ -12,6 +12,7 @@
17630 #include <asm/smap.h>
17631
17632 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
17633+ typecheck(u32 __user *, uaddr); \
17634 asm volatile("\t" ASM_STAC "\n" \
17635 "1:\t" insn "\n" \
17636 "2:\t" ASM_CLAC "\n" \
17637@@ -20,15 +21,16 @@
17638 "\tjmp\t2b\n" \
17639 "\t.previous\n" \
17640 _ASM_EXTABLE(1b, 3b) \
17641- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
17642+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr)) \
17643 : "i" (-EFAULT), "0" (oparg), "1" (0))
17644
17645 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
17646+ typecheck(u32 __user *, uaddr); \
17647 asm volatile("\t" ASM_STAC "\n" \
17648 "1:\tmovl %2, %0\n" \
17649 "\tmovl\t%0, %3\n" \
17650 "\t" insn "\n" \
17651- "2:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \
17652+ "2:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %2\n" \
17653 "\tjnz\t1b\n" \
17654 "3:\t" ASM_CLAC "\n" \
17655 "\t.section .fixup,\"ax\"\n" \
17656@@ -38,7 +40,7 @@
17657 _ASM_EXTABLE(1b, 4b) \
17658 _ASM_EXTABLE(2b, 4b) \
17659 : "=&a" (oldval), "=&r" (ret), \
17660- "+m" (*uaddr), "=&r" (tem) \
17661+ "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
17662 : "r" (oparg), "i" (-EFAULT), "1" (0))
17663
17664 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
17665@@ -57,12 +59,13 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
17666
17667 pagefault_disable();
17668
17669+ pax_open_userland();
17670 switch (op) {
17671 case FUTEX_OP_SET:
17672- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
17673+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
17674 break;
17675 case FUTEX_OP_ADD:
17676- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
17677+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
17678 uaddr, oparg);
17679 break;
17680 case FUTEX_OP_OR:
17681@@ -77,6 +80,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
17682 default:
17683 ret = -ENOSYS;
17684 }
17685+ pax_close_userland();
17686
17687 pagefault_enable();
17688
17689diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
17690index 4615906..788c817 100644
17691--- a/arch/x86/include/asm/hw_irq.h
17692+++ b/arch/x86/include/asm/hw_irq.h
17693@@ -164,8 +164,8 @@ extern void setup_ioapic_dest(void);
17694 extern void enable_IO_APIC(void);
17695
17696 /* Statistics */
17697-extern atomic_t irq_err_count;
17698-extern atomic_t irq_mis_count;
17699+extern atomic_unchecked_t irq_err_count;
17700+extern atomic_unchecked_t irq_mis_count;
17701
17702 /* EISA */
17703 extern void eisa_set_level_irq(unsigned int irq);
17704diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h
17705index ccffa53..3c90c87 100644
17706--- a/arch/x86/include/asm/i8259.h
17707+++ b/arch/x86/include/asm/i8259.h
17708@@ -62,7 +62,7 @@ struct legacy_pic {
17709 void (*init)(int auto_eoi);
17710 int (*irq_pending)(unsigned int irq);
17711 void (*make_irq)(unsigned int irq);
17712-};
17713+} __do_const;
17714
17715 extern struct legacy_pic *legacy_pic;
17716 extern struct legacy_pic null_legacy_pic;
17717diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
17718index b8237d8..3e8864e 100644
17719--- a/arch/x86/include/asm/io.h
17720+++ b/arch/x86/include/asm/io.h
17721@@ -52,12 +52,12 @@ static inline void name(type val, volatile void __iomem *addr) \
17722 "m" (*(volatile type __force *)addr) barrier); }
17723
17724 build_mmio_read(readb, "b", unsigned char, "=q", :"memory")
17725-build_mmio_read(readw, "w", unsigned short, "=r", :"memory")
17726-build_mmio_read(readl, "l", unsigned int, "=r", :"memory")
17727+build_mmio_read(__intentional_overflow(-1) readw, "w", unsigned short, "=r", :"memory")
17728+build_mmio_read(__intentional_overflow(-1) readl, "l", unsigned int, "=r", :"memory")
17729
17730 build_mmio_read(__readb, "b", unsigned char, "=q", )
17731-build_mmio_read(__readw, "w", unsigned short, "=r", )
17732-build_mmio_read(__readl, "l", unsigned int, "=r", )
17733+build_mmio_read(__intentional_overflow(-1) __readw, "w", unsigned short, "=r", )
17734+build_mmio_read(__intentional_overflow(-1) __readl, "l", unsigned int, "=r", )
17735
17736 build_mmio_write(writeb, "b", unsigned char, "q", :"memory")
17737 build_mmio_write(writew, "w", unsigned short, "r", :"memory")
17738@@ -109,7 +109,7 @@ build_mmio_write(writeq, "q", unsigned long, "r", :"memory")
17739 * this function
17740 */
17741
17742-static inline phys_addr_t virt_to_phys(volatile void *address)
17743+static inline phys_addr_t __intentional_overflow(-1) virt_to_phys(volatile void *address)
17744 {
17745 return __pa(address);
17746 }
17747@@ -185,7 +185,7 @@ static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
17748 return ioremap_nocache(offset, size);
17749 }
17750
17751-extern void iounmap(volatile void __iomem *addr);
17752+extern void iounmap(const volatile void __iomem *addr);
17753
17754 extern void set_iounmap_nonlazy(void);
17755
17756@@ -195,6 +195,17 @@ extern void set_iounmap_nonlazy(void);
17757
17758 #include <linux/vmalloc.h>
17759
17760+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
17761+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
17762+{
17763+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
17764+}
17765+
17766+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
17767+{
17768+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
17769+}
17770+
17771 /*
17772 * Convert a virtual cached pointer to an uncached pointer
17773 */
17774diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
17775index 0a8b519..80e7d5b 100644
17776--- a/arch/x86/include/asm/irqflags.h
17777+++ b/arch/x86/include/asm/irqflags.h
17778@@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
17779 sti; \
17780 sysexit
17781
17782+#define GET_CR0_INTO_RDI mov %cr0, %rdi
17783+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
17784+#define GET_CR3_INTO_RDI mov %cr3, %rdi
17785+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
17786+
17787 #else
17788 #define INTERRUPT_RETURN iret
17789 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
17790diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
17791index 53cdfb2..d1369e6 100644
17792--- a/arch/x86/include/asm/kprobes.h
17793+++ b/arch/x86/include/asm/kprobes.h
17794@@ -38,13 +38,8 @@ typedef u8 kprobe_opcode_t;
17795 #define RELATIVEJUMP_SIZE 5
17796 #define RELATIVECALL_OPCODE 0xe8
17797 #define RELATIVE_ADDR_SIZE 4
17798-#define MAX_STACK_SIZE 64
17799-#define MIN_STACK_SIZE(ADDR) \
17800- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
17801- THREAD_SIZE - (unsigned long)(ADDR))) \
17802- ? (MAX_STACK_SIZE) \
17803- : (((unsigned long)current_thread_info()) + \
17804- THREAD_SIZE - (unsigned long)(ADDR)))
17805+#define MAX_STACK_SIZE 64UL
17806+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
17807
17808 #define flush_insn_slot(p) do { } while (0)
17809
17810diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
17811index 92d3486..0d47ae1 100644
17812--- a/arch/x86/include/asm/kvm_host.h
17813+++ b/arch/x86/include/asm/kvm_host.h
17814@@ -991,6 +991,20 @@ static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code)
17815 kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
17816 }
17817
17818+static inline u64 get_canonical(u64 la)
17819+{
17820+ return ((int64_t)la << 16) >> 16;
17821+}
17822+
17823+static inline bool is_noncanonical_address(u64 la)
17824+{
17825+#ifdef CONFIG_X86_64
17826+ return get_canonical(la) != la;
17827+#else
17828+ return false;
17829+#endif
17830+}
17831+
17832 #define TSS_IOPB_BASE_OFFSET 0x66
17833 #define TSS_BASE_SIZE 0x68
17834 #define TSS_IOPB_SIZE (65536 / 8)
17835@@ -1049,7 +1063,7 @@ int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
17836 void kvm_vcpu_reset(struct kvm_vcpu *vcpu);
17837
17838 void kvm_define_shared_msr(unsigned index, u32 msr);
17839-void kvm_set_shared_msr(unsigned index, u64 val, u64 mask);
17840+int kvm_set_shared_msr(unsigned index, u64 val, u64 mask);
17841
17842 bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip);
17843
17844diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
17845index 4ad6560..75c7bdd 100644
17846--- a/arch/x86/include/asm/local.h
17847+++ b/arch/x86/include/asm/local.h
17848@@ -10,33 +10,97 @@ typedef struct {
17849 atomic_long_t a;
17850 } local_t;
17851
17852+typedef struct {
17853+ atomic_long_unchecked_t a;
17854+} local_unchecked_t;
17855+
17856 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
17857
17858 #define local_read(l) atomic_long_read(&(l)->a)
17859+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
17860 #define local_set(l, i) atomic_long_set(&(l)->a, (i))
17861+#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
17862
17863 static inline void local_inc(local_t *l)
17864 {
17865- asm volatile(_ASM_INC "%0"
17866+ asm volatile(_ASM_INC "%0\n"
17867+
17868+#ifdef CONFIG_PAX_REFCOUNT
17869+ "jno 0f\n"
17870+ _ASM_DEC "%0\n"
17871+ "int $4\n0:\n"
17872+ _ASM_EXTABLE(0b, 0b)
17873+#endif
17874+
17875+ : "+m" (l->a.counter));
17876+}
17877+
17878+static inline void local_inc_unchecked(local_unchecked_t *l)
17879+{
17880+ asm volatile(_ASM_INC "%0\n"
17881 : "+m" (l->a.counter));
17882 }
17883
17884 static inline void local_dec(local_t *l)
17885 {
17886- asm volatile(_ASM_DEC "%0"
17887+ asm volatile(_ASM_DEC "%0\n"
17888+
17889+#ifdef CONFIG_PAX_REFCOUNT
17890+ "jno 0f\n"
17891+ _ASM_INC "%0\n"
17892+ "int $4\n0:\n"
17893+ _ASM_EXTABLE(0b, 0b)
17894+#endif
17895+
17896+ : "+m" (l->a.counter));
17897+}
17898+
17899+static inline void local_dec_unchecked(local_unchecked_t *l)
17900+{
17901+ asm volatile(_ASM_DEC "%0\n"
17902 : "+m" (l->a.counter));
17903 }
17904
17905 static inline void local_add(long i, local_t *l)
17906 {
17907- asm volatile(_ASM_ADD "%1,%0"
17908+ asm volatile(_ASM_ADD "%1,%0\n"
17909+
17910+#ifdef CONFIG_PAX_REFCOUNT
17911+ "jno 0f\n"
17912+ _ASM_SUB "%1,%0\n"
17913+ "int $4\n0:\n"
17914+ _ASM_EXTABLE(0b, 0b)
17915+#endif
17916+
17917+ : "+m" (l->a.counter)
17918+ : "ir" (i));
17919+}
17920+
17921+static inline void local_add_unchecked(long i, local_unchecked_t *l)
17922+{
17923+ asm volatile(_ASM_ADD "%1,%0\n"
17924 : "+m" (l->a.counter)
17925 : "ir" (i));
17926 }
17927
17928 static inline void local_sub(long i, local_t *l)
17929 {
17930- asm volatile(_ASM_SUB "%1,%0"
17931+ asm volatile(_ASM_SUB "%1,%0\n"
17932+
17933+#ifdef CONFIG_PAX_REFCOUNT
17934+ "jno 0f\n"
17935+ _ASM_ADD "%1,%0\n"
17936+ "int $4\n0:\n"
17937+ _ASM_EXTABLE(0b, 0b)
17938+#endif
17939+
17940+ : "+m" (l->a.counter)
17941+ : "ir" (i));
17942+}
17943+
17944+static inline void local_sub_unchecked(long i, local_unchecked_t *l)
17945+{
17946+ asm volatile(_ASM_SUB "%1,%0\n"
17947 : "+m" (l->a.counter)
17948 : "ir" (i));
17949 }
17950@@ -52,7 +116,7 @@ static inline void local_sub(long i, local_t *l)
17951 */
17952 static inline int local_sub_and_test(long i, local_t *l)
17953 {
17954- GEN_BINARY_RMWcc(_ASM_SUB, l->a.counter, "er", i, "%0", "e");
17955+ GEN_BINARY_RMWcc(_ASM_SUB, _ASM_ADD, l->a.counter, "er", i, "%0", "e");
17956 }
17957
17958 /**
17959@@ -65,7 +129,7 @@ static inline int local_sub_and_test(long i, local_t *l)
17960 */
17961 static inline int local_dec_and_test(local_t *l)
17962 {
17963- GEN_UNARY_RMWcc(_ASM_DEC, l->a.counter, "%0", "e");
17964+ GEN_UNARY_RMWcc(_ASM_DEC, _ASM_INC, l->a.counter, "%0", "e");
17965 }
17966
17967 /**
17968@@ -78,7 +142,7 @@ static inline int local_dec_and_test(local_t *l)
17969 */
17970 static inline int local_inc_and_test(local_t *l)
17971 {
17972- GEN_UNARY_RMWcc(_ASM_INC, l->a.counter, "%0", "e");
17973+ GEN_UNARY_RMWcc(_ASM_INC, _ASM_DEC, l->a.counter, "%0", "e");
17974 }
17975
17976 /**
17977@@ -92,7 +156,7 @@ static inline int local_inc_and_test(local_t *l)
17978 */
17979 static inline int local_add_negative(long i, local_t *l)
17980 {
17981- GEN_BINARY_RMWcc(_ASM_ADD, l->a.counter, "er", i, "%0", "s");
17982+ GEN_BINARY_RMWcc(_ASM_ADD, _ASM_SUB, l->a.counter, "er", i, "%0", "s");
17983 }
17984
17985 /**
17986@@ -105,6 +169,30 @@ static inline int local_add_negative(long i, local_t *l)
17987 static inline long local_add_return(long i, local_t *l)
17988 {
17989 long __i = i;
17990+ asm volatile(_ASM_XADD "%0, %1\n"
17991+
17992+#ifdef CONFIG_PAX_REFCOUNT
17993+ "jno 0f\n"
17994+ _ASM_MOV "%0,%1\n"
17995+ "int $4\n0:\n"
17996+ _ASM_EXTABLE(0b, 0b)
17997+#endif
17998+
17999+ : "+r" (i), "+m" (l->a.counter)
18000+ : : "memory");
18001+ return i + __i;
18002+}
18003+
18004+/**
18005+ * local_add_return_unchecked - add and return
18006+ * @i: integer value to add
18007+ * @l: pointer to type local_unchecked_t
18008+ *
18009+ * Atomically adds @i to @l and returns @i + @l
18010+ */
18011+static inline long local_add_return_unchecked(long i, local_unchecked_t *l)
18012+{
18013+ long __i = i;
18014 asm volatile(_ASM_XADD "%0, %1;"
18015 : "+r" (i), "+m" (l->a.counter)
18016 : : "memory");
18017@@ -121,6 +209,8 @@ static inline long local_sub_return(long i, local_t *l)
18018
18019 #define local_cmpxchg(l, o, n) \
18020 (cmpxchg_local(&((l)->a.counter), (o), (n)))
18021+#define local_cmpxchg_unchecked(l, o, n) \
18022+ (cmpxchg_local(&((l)->a.counter), (o), (n)))
18023 /* Always has a lock prefix */
18024 #define local_xchg(l, n) (xchg(&((l)->a.counter), (n)))
18025
18026diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
18027new file mode 100644
18028index 0000000..2bfd3ba
18029--- /dev/null
18030+++ b/arch/x86/include/asm/mman.h
18031@@ -0,0 +1,15 @@
18032+#ifndef _X86_MMAN_H
18033+#define _X86_MMAN_H
18034+
18035+#include <uapi/asm/mman.h>
18036+
18037+#ifdef __KERNEL__
18038+#ifndef __ASSEMBLY__
18039+#ifdef CONFIG_X86_32
18040+#define arch_mmap_check i386_mmap_check
18041+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags);
18042+#endif
18043+#endif
18044+#endif
18045+
18046+#endif /* X86_MMAN_H */
18047diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
18048index 876e74e..e20bfb1 100644
18049--- a/arch/x86/include/asm/mmu.h
18050+++ b/arch/x86/include/asm/mmu.h
18051@@ -9,7 +9,7 @@
18052 * we put the segment information here.
18053 */
18054 typedef struct {
18055- void *ldt;
18056+ struct desc_struct *ldt;
18057 int size;
18058
18059 #ifdef CONFIG_X86_64
18060@@ -18,7 +18,19 @@ typedef struct {
18061 #endif
18062
18063 struct mutex lock;
18064- void __user *vdso;
18065+ unsigned long vdso;
18066+
18067+#ifdef CONFIG_X86_32
18068+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
18069+ unsigned long user_cs_base;
18070+ unsigned long user_cs_limit;
18071+
18072+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
18073+ cpumask_t cpu_user_cs_mask;
18074+#endif
18075+
18076+#endif
18077+#endif
18078 } mm_context_t;
18079
18080 #ifdef CONFIG_SMP
18081diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
18082index 166af2a..648c200 100644
18083--- a/arch/x86/include/asm/mmu_context.h
18084+++ b/arch/x86/include/asm/mmu_context.h
18085@@ -28,6 +28,20 @@ void destroy_context(struct mm_struct *mm);
18086
18087 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
18088 {
18089+
18090+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18091+ if (!(static_cpu_has(X86_FEATURE_PCID))) {
18092+ unsigned int i;
18093+ pgd_t *pgd;
18094+
18095+ pax_open_kernel();
18096+ pgd = get_cpu_pgd(smp_processor_id(), kernel);
18097+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
18098+ set_pgd_batched(pgd+i, native_make_pgd(0));
18099+ pax_close_kernel();
18100+ }
18101+#endif
18102+
18103 #ifdef CONFIG_SMP
18104 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
18105 this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
18106@@ -38,16 +52,59 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
18107 struct task_struct *tsk)
18108 {
18109 unsigned cpu = smp_processor_id();
18110+#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
18111+ int tlbstate = TLBSTATE_OK;
18112+#endif
18113
18114 if (likely(prev != next)) {
18115 #ifdef CONFIG_SMP
18116+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
18117+ tlbstate = this_cpu_read(cpu_tlbstate.state);
18118+#endif
18119 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
18120 this_cpu_write(cpu_tlbstate.active_mm, next);
18121 #endif
18122 cpumask_set_cpu(cpu, mm_cpumask(next));
18123
18124 /* Re-load page tables */
18125+#ifdef CONFIG_PAX_PER_CPU_PGD
18126+ pax_open_kernel();
18127+
18128+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18129+ if (static_cpu_has(X86_FEATURE_PCID))
18130+ __clone_user_pgds(get_cpu_pgd(cpu, user), next->pgd);
18131+ else
18132+#endif
18133+
18134+ __clone_user_pgds(get_cpu_pgd(cpu, kernel), next->pgd);
18135+ __shadow_user_pgds(get_cpu_pgd(cpu, kernel) + USER_PGD_PTRS, next->pgd);
18136+ pax_close_kernel();
18137+ BUG_ON((__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL) != (read_cr3() & __PHYSICAL_MASK) && (__pa(get_cpu_pgd(cpu, user)) | PCID_USER) != (read_cr3() & __PHYSICAL_MASK));
18138+
18139+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18140+ if (static_cpu_has(X86_FEATURE_PCID)) {
18141+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
18142+ u64 descriptor[2];
18143+ descriptor[0] = PCID_USER;
18144+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
18145+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF)) {
18146+ descriptor[0] = PCID_KERNEL;
18147+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
18148+ }
18149+ } else {
18150+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
18151+ if (static_cpu_has(X86_FEATURE_STRONGUDEREF))
18152+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
18153+ else
18154+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
18155+ }
18156+ } else
18157+#endif
18158+
18159+ load_cr3(get_cpu_pgd(cpu, kernel));
18160+#else
18161 load_cr3(next->pgd);
18162+#endif
18163 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
18164
18165 /* Stop flush ipis for the previous mm */
18166@@ -56,9 +113,67 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
18167 /* Load the LDT, if the LDT is different: */
18168 if (unlikely(prev->context.ldt != next->context.ldt))
18169 load_LDT_nolock(&next->context);
18170+
18171+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
18172+ if (!(__supported_pte_mask & _PAGE_NX)) {
18173+ smp_mb__before_atomic();
18174+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
18175+ smp_mb__after_atomic();
18176+ cpu_set(cpu, next->context.cpu_user_cs_mask);
18177+ }
18178+#endif
18179+
18180+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
18181+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
18182+ prev->context.user_cs_limit != next->context.user_cs_limit))
18183+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
18184+#ifdef CONFIG_SMP
18185+ else if (unlikely(tlbstate != TLBSTATE_OK))
18186+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
18187+#endif
18188+#endif
18189+
18190 }
18191+ else {
18192+
18193+#ifdef CONFIG_PAX_PER_CPU_PGD
18194+ pax_open_kernel();
18195+
18196+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18197+ if (static_cpu_has(X86_FEATURE_PCID))
18198+ __clone_user_pgds(get_cpu_pgd(cpu, user), next->pgd);
18199+ else
18200+#endif
18201+
18202+ __clone_user_pgds(get_cpu_pgd(cpu, kernel), next->pgd);
18203+ __shadow_user_pgds(get_cpu_pgd(cpu, kernel) + USER_PGD_PTRS, next->pgd);
18204+ pax_close_kernel();
18205+ BUG_ON((__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL) != (read_cr3() & __PHYSICAL_MASK) && (__pa(get_cpu_pgd(cpu, user)) | PCID_USER) != (read_cr3() & __PHYSICAL_MASK));
18206+
18207+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18208+ if (static_cpu_has(X86_FEATURE_PCID)) {
18209+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
18210+ u64 descriptor[2];
18211+ descriptor[0] = PCID_USER;
18212+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
18213+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF)) {
18214+ descriptor[0] = PCID_KERNEL;
18215+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
18216+ }
18217+ } else {
18218+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
18219+ if (static_cpu_has(X86_FEATURE_STRONGUDEREF))
18220+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
18221+ else
18222+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
18223+ }
18224+ } else
18225+#endif
18226+
18227+ load_cr3(get_cpu_pgd(cpu, kernel));
18228+#endif
18229+
18230 #ifdef CONFIG_SMP
18231- else {
18232 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
18233 BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
18234
18235@@ -75,12 +190,29 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
18236 * tlb flush IPI delivery. We must reload CR3
18237 * to make sure to use no freed page tables.
18238 */
18239+
18240+#ifndef CONFIG_PAX_PER_CPU_PGD
18241 load_cr3(next->pgd);
18242 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
18243+#endif
18244+
18245 load_LDT_nolock(&next->context);
18246+
18247+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
18248+ if (!(__supported_pte_mask & _PAGE_NX))
18249+ cpu_set(cpu, next->context.cpu_user_cs_mask);
18250+#endif
18251+
18252+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
18253+#ifdef CONFIG_PAX_PAGEEXEC
18254+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
18255+#endif
18256+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
18257+#endif
18258+
18259 }
18260+#endif
18261 }
18262-#endif
18263 }
18264
18265 #define activate_mm(prev, next) \
18266diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
18267index e3b7819..b257c64 100644
18268--- a/arch/x86/include/asm/module.h
18269+++ b/arch/x86/include/asm/module.h
18270@@ -5,6 +5,7 @@
18271
18272 #ifdef CONFIG_X86_64
18273 /* X86_64 does not define MODULE_PROC_FAMILY */
18274+#define MODULE_PROC_FAMILY ""
18275 #elif defined CONFIG_M486
18276 #define MODULE_PROC_FAMILY "486 "
18277 #elif defined CONFIG_M586
18278@@ -57,8 +58,20 @@
18279 #error unknown processor family
18280 #endif
18281
18282-#ifdef CONFIG_X86_32
18283-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
18284+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
18285+#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
18286+#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
18287+#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
18288+#else
18289+#define MODULE_PAX_KERNEXEC ""
18290 #endif
18291
18292+#ifdef CONFIG_PAX_MEMORY_UDEREF
18293+#define MODULE_PAX_UDEREF "UDEREF "
18294+#else
18295+#define MODULE_PAX_UDEREF ""
18296+#endif
18297+
18298+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
18299+
18300 #endif /* _ASM_X86_MODULE_H */
18301diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h
18302index 5f2fc44..106caa6 100644
18303--- a/arch/x86/include/asm/nmi.h
18304+++ b/arch/x86/include/asm/nmi.h
18305@@ -36,26 +36,35 @@ enum {
18306
18307 typedef int (*nmi_handler_t)(unsigned int, struct pt_regs *);
18308
18309+struct nmiaction;
18310+
18311+struct nmiwork {
18312+ const struct nmiaction *action;
18313+ u64 max_duration;
18314+ struct irq_work irq_work;
18315+};
18316+
18317 struct nmiaction {
18318 struct list_head list;
18319 nmi_handler_t handler;
18320- u64 max_duration;
18321- struct irq_work irq_work;
18322 unsigned long flags;
18323 const char *name;
18324-};
18325+ struct nmiwork *work;
18326+} __do_const;
18327
18328 #define register_nmi_handler(t, fn, fg, n, init...) \
18329 ({ \
18330- static struct nmiaction init fn##_na = { \
18331+ static struct nmiwork fn##_nw; \
18332+ static const struct nmiaction init fn##_na = { \
18333 .handler = (fn), \
18334 .name = (n), \
18335 .flags = (fg), \
18336+ .work = &fn##_nw, \
18337 }; \
18338 __register_nmi_handler((t), &fn##_na); \
18339 })
18340
18341-int __register_nmi_handler(unsigned int, struct nmiaction *);
18342+int __register_nmi_handler(unsigned int, const struct nmiaction *);
18343
18344 void unregister_nmi_handler(unsigned int, const char *);
18345
18346diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h
18347index 802dde3..9183e68 100644
18348--- a/arch/x86/include/asm/page.h
18349+++ b/arch/x86/include/asm/page.h
18350@@ -52,6 +52,7 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
18351 __phys_addr_symbol(__phys_reloc_hide((unsigned long)(x)))
18352
18353 #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
18354+#define __early_va(x) ((void *)((unsigned long)(x)+__START_KERNEL_map - phys_base))
18355
18356 #define __boot_va(x) __va(x)
18357 #define __boot_pa(x) __pa(x)
18358@@ -60,11 +61,21 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
18359 * virt_to_page(kaddr) returns a valid pointer if and only if
18360 * virt_addr_valid(kaddr) returns true.
18361 */
18362-#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
18363 #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
18364 extern bool __virt_addr_valid(unsigned long kaddr);
18365 #define virt_addr_valid(kaddr) __virt_addr_valid((unsigned long) (kaddr))
18366
18367+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
18368+#define virt_to_page(kaddr) \
18369+ ({ \
18370+ const void *__kaddr = (const void *)(kaddr); \
18371+ BUG_ON(!virt_addr_valid(__kaddr)); \
18372+ pfn_to_page(__pa(__kaddr) >> PAGE_SHIFT); \
18373+ })
18374+#else
18375+#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
18376+#endif
18377+
18378 #endif /* __ASSEMBLY__ */
18379
18380 #include <asm-generic/memory_model.h>
18381diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h
18382index f408caf..4a0455e 100644
18383--- a/arch/x86/include/asm/page_64.h
18384+++ b/arch/x86/include/asm/page_64.h
18385@@ -7,9 +7,9 @@
18386
18387 /* duplicated to the one in bootmem.h */
18388 extern unsigned long max_pfn;
18389-extern unsigned long phys_base;
18390+extern const unsigned long phys_base;
18391
18392-static inline unsigned long __phys_addr_nodebug(unsigned long x)
18393+static inline unsigned long __intentional_overflow(-1) __phys_addr_nodebug(unsigned long x)
18394 {
18395 unsigned long y = x - __START_KERNEL_map;
18396
18397diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
18398index cd6e1610..70f4418 100644
18399--- a/arch/x86/include/asm/paravirt.h
18400+++ b/arch/x86/include/asm/paravirt.h
18401@@ -560,7 +560,7 @@ static inline pmd_t __pmd(pmdval_t val)
18402 return (pmd_t) { ret };
18403 }
18404
18405-static inline pmdval_t pmd_val(pmd_t pmd)
18406+static inline __intentional_overflow(-1) pmdval_t pmd_val(pmd_t pmd)
18407 {
18408 pmdval_t ret;
18409
18410@@ -626,6 +626,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
18411 val);
18412 }
18413
18414+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
18415+{
18416+ pgdval_t val = native_pgd_val(pgd);
18417+
18418+ if (sizeof(pgdval_t) > sizeof(long))
18419+ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
18420+ val, (u64)val >> 32);
18421+ else
18422+ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
18423+ val);
18424+}
18425+
18426 static inline void pgd_clear(pgd_t *pgdp)
18427 {
18428 set_pgd(pgdp, __pgd(0));
18429@@ -710,6 +722,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
18430 pv_mmu_ops.set_fixmap(idx, phys, flags);
18431 }
18432
18433+#ifdef CONFIG_PAX_KERNEXEC
18434+static inline unsigned long pax_open_kernel(void)
18435+{
18436+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
18437+}
18438+
18439+static inline unsigned long pax_close_kernel(void)
18440+{
18441+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
18442+}
18443+#else
18444+static inline unsigned long pax_open_kernel(void) { return 0; }
18445+static inline unsigned long pax_close_kernel(void) { return 0; }
18446+#endif
18447+
18448 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
18449
18450 static __always_inline void __ticket_lock_spinning(struct arch_spinlock *lock,
18451@@ -906,7 +933,7 @@ extern void default_banner(void);
18452
18453 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
18454 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
18455-#define PARA_INDIRECT(addr) *%cs:addr
18456+#define PARA_INDIRECT(addr) *%ss:addr
18457 #endif
18458
18459 #define INTERRUPT_RETURN \
18460@@ -981,6 +1008,21 @@ extern void default_banner(void);
18461 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
18462 CLBR_NONE, \
18463 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
18464+
18465+#define GET_CR0_INTO_RDI \
18466+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
18467+ mov %rax,%rdi
18468+
18469+#define SET_RDI_INTO_CR0 \
18470+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
18471+
18472+#define GET_CR3_INTO_RDI \
18473+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
18474+ mov %rax,%rdi
18475+
18476+#define SET_RDI_INTO_CR3 \
18477+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
18478+
18479 #endif /* CONFIG_X86_32 */
18480
18481 #endif /* __ASSEMBLY__ */
18482diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
18483index 7549b8b..f0edfda 100644
18484--- a/arch/x86/include/asm/paravirt_types.h
18485+++ b/arch/x86/include/asm/paravirt_types.h
18486@@ -84,7 +84,7 @@ struct pv_init_ops {
18487 */
18488 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
18489 unsigned long addr, unsigned len);
18490-};
18491+} __no_const __no_randomize_layout;
18492
18493
18494 struct pv_lazy_ops {
18495@@ -92,13 +92,13 @@ struct pv_lazy_ops {
18496 void (*enter)(void);
18497 void (*leave)(void);
18498 void (*flush)(void);
18499-};
18500+} __no_randomize_layout;
18501
18502 struct pv_time_ops {
18503 unsigned long long (*sched_clock)(void);
18504 unsigned long long (*steal_clock)(int cpu);
18505 unsigned long (*get_tsc_khz)(void);
18506-};
18507+} __no_const __no_randomize_layout;
18508
18509 struct pv_cpu_ops {
18510 /* hooks for various privileged instructions */
18511@@ -192,7 +192,7 @@ struct pv_cpu_ops {
18512
18513 void (*start_context_switch)(struct task_struct *prev);
18514 void (*end_context_switch)(struct task_struct *next);
18515-};
18516+} __no_const __no_randomize_layout;
18517
18518 struct pv_irq_ops {
18519 /*
18520@@ -215,7 +215,7 @@ struct pv_irq_ops {
18521 #ifdef CONFIG_X86_64
18522 void (*adjust_exception_frame)(void);
18523 #endif
18524-};
18525+} __no_randomize_layout;
18526
18527 struct pv_apic_ops {
18528 #ifdef CONFIG_X86_LOCAL_APIC
18529@@ -223,7 +223,7 @@ struct pv_apic_ops {
18530 unsigned long start_eip,
18531 unsigned long start_esp);
18532 #endif
18533-};
18534+} __no_const __no_randomize_layout;
18535
18536 struct pv_mmu_ops {
18537 unsigned long (*read_cr2)(void);
18538@@ -313,6 +313,7 @@ struct pv_mmu_ops {
18539 struct paravirt_callee_save make_pud;
18540
18541 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
18542+ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
18543 #endif /* PAGETABLE_LEVELS == 4 */
18544 #endif /* PAGETABLE_LEVELS >= 3 */
18545
18546@@ -324,7 +325,13 @@ struct pv_mmu_ops {
18547 an mfn. We can tell which is which from the index. */
18548 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
18549 phys_addr_t phys, pgprot_t flags);
18550-};
18551+
18552+#ifdef CONFIG_PAX_KERNEXEC
18553+ unsigned long (*pax_open_kernel)(void);
18554+ unsigned long (*pax_close_kernel)(void);
18555+#endif
18556+
18557+} __no_randomize_layout;
18558
18559 struct arch_spinlock;
18560 #ifdef CONFIG_SMP
18561@@ -336,11 +343,14 @@ typedef u16 __ticket_t;
18562 struct pv_lock_ops {
18563 struct paravirt_callee_save lock_spinning;
18564 void (*unlock_kick)(struct arch_spinlock *lock, __ticket_t ticket);
18565-};
18566+} __no_randomize_layout;
18567
18568 /* This contains all the paravirt structures: we get a convenient
18569 * number for each function using the offset which we use to indicate
18570- * what to patch. */
18571+ * what to patch.
18572+ * shouldn't be randomized due to the "NEAT TRICK" in paravirt.c
18573+ */
18574+
18575 struct paravirt_patch_template {
18576 struct pv_init_ops pv_init_ops;
18577 struct pv_time_ops pv_time_ops;
18578@@ -349,7 +359,7 @@ struct paravirt_patch_template {
18579 struct pv_apic_ops pv_apic_ops;
18580 struct pv_mmu_ops pv_mmu_ops;
18581 struct pv_lock_ops pv_lock_ops;
18582-};
18583+} __no_randomize_layout;
18584
18585 extern struct pv_info pv_info;
18586 extern struct pv_init_ops pv_init_ops;
18587diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
18588index c4412e9..90e88c5 100644
18589--- a/arch/x86/include/asm/pgalloc.h
18590+++ b/arch/x86/include/asm/pgalloc.h
18591@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
18592 pmd_t *pmd, pte_t *pte)
18593 {
18594 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
18595+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
18596+}
18597+
18598+static inline void pmd_populate_user(struct mm_struct *mm,
18599+ pmd_t *pmd, pte_t *pte)
18600+{
18601+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
18602 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
18603 }
18604
18605@@ -108,12 +115,22 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
18606
18607 #ifdef CONFIG_X86_PAE
18608 extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
18609+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
18610+{
18611+ pud_populate(mm, pudp, pmd);
18612+}
18613 #else /* !CONFIG_X86_PAE */
18614 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
18615 {
18616 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
18617 set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
18618 }
18619+
18620+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
18621+{
18622+ paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
18623+ set_pud(pud, __pud(_KERNPG_TABLE | __pa(pmd)));
18624+}
18625 #endif /* CONFIG_X86_PAE */
18626
18627 #if PAGETABLE_LEVELS > 3
18628@@ -123,6 +140,12 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
18629 set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
18630 }
18631
18632+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
18633+{
18634+ paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
18635+ set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(pud)));
18636+}
18637+
18638 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
18639 {
18640 return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
18641diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
18642index 206a87f..1623b06 100644
18643--- a/arch/x86/include/asm/pgtable-2level.h
18644+++ b/arch/x86/include/asm/pgtable-2level.h
18645@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
18646
18647 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
18648 {
18649+ pax_open_kernel();
18650 *pmdp = pmd;
18651+ pax_close_kernel();
18652 }
18653
18654 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
18655diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
18656index 81bb91b..9392125 100644
18657--- a/arch/x86/include/asm/pgtable-3level.h
18658+++ b/arch/x86/include/asm/pgtable-3level.h
18659@@ -92,12 +92,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
18660
18661 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
18662 {
18663+ pax_open_kernel();
18664 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
18665+ pax_close_kernel();
18666 }
18667
18668 static inline void native_set_pud(pud_t *pudp, pud_t pud)
18669 {
18670+ pax_open_kernel();
18671 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
18672+ pax_close_kernel();
18673 }
18674
18675 /*
18676diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
18677index aa97a07..5c53c32 100644
18678--- a/arch/x86/include/asm/pgtable.h
18679+++ b/arch/x86/include/asm/pgtable.h
18680@@ -46,6 +46,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
18681
18682 #ifndef __PAGETABLE_PUD_FOLDED
18683 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
18684+#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
18685 #define pgd_clear(pgd) native_pgd_clear(pgd)
18686 #endif
18687
18688@@ -83,12 +84,53 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
18689
18690 #define arch_end_context_switch(prev) do {} while(0)
18691
18692+#define pax_open_kernel() native_pax_open_kernel()
18693+#define pax_close_kernel() native_pax_close_kernel()
18694 #endif /* CONFIG_PARAVIRT */
18695
18696+#define __HAVE_ARCH_PAX_OPEN_KERNEL
18697+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
18698+
18699+#ifdef CONFIG_PAX_KERNEXEC
18700+static inline unsigned long native_pax_open_kernel(void)
18701+{
18702+ unsigned long cr0;
18703+
18704+ preempt_disable();
18705+ barrier();
18706+ cr0 = read_cr0() ^ X86_CR0_WP;
18707+ BUG_ON(cr0 & X86_CR0_WP);
18708+ write_cr0(cr0);
18709+ barrier();
18710+ return cr0 ^ X86_CR0_WP;
18711+}
18712+
18713+static inline unsigned long native_pax_close_kernel(void)
18714+{
18715+ unsigned long cr0;
18716+
18717+ barrier();
18718+ cr0 = read_cr0() ^ X86_CR0_WP;
18719+ BUG_ON(!(cr0 & X86_CR0_WP));
18720+ write_cr0(cr0);
18721+ barrier();
18722+ preempt_enable_no_resched();
18723+ return cr0 ^ X86_CR0_WP;
18724+}
18725+#else
18726+static inline unsigned long native_pax_open_kernel(void) { return 0; }
18727+static inline unsigned long native_pax_close_kernel(void) { return 0; }
18728+#endif
18729+
18730 /*
18731 * The following only work if pte_present() is true.
18732 * Undefined behaviour if not..
18733 */
18734+static inline int pte_user(pte_t pte)
18735+{
18736+ return pte_val(pte) & _PAGE_USER;
18737+}
18738+
18739 static inline int pte_dirty(pte_t pte)
18740 {
18741 return pte_flags(pte) & _PAGE_DIRTY;
18742@@ -155,6 +197,11 @@ static inline unsigned long pud_pfn(pud_t pud)
18743 return (pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT;
18744 }
18745
18746+static inline unsigned long pgd_pfn(pgd_t pgd)
18747+{
18748+ return (pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT;
18749+}
18750+
18751 #define pte_page(pte) pfn_to_page(pte_pfn(pte))
18752
18753 static inline int pmd_large(pmd_t pte)
18754@@ -208,9 +255,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
18755 return pte_clear_flags(pte, _PAGE_RW);
18756 }
18757
18758+static inline pte_t pte_mkread(pte_t pte)
18759+{
18760+ return __pte(pte_val(pte) | _PAGE_USER);
18761+}
18762+
18763 static inline pte_t pte_mkexec(pte_t pte)
18764 {
18765- return pte_clear_flags(pte, _PAGE_NX);
18766+#ifdef CONFIG_X86_PAE
18767+ if (__supported_pte_mask & _PAGE_NX)
18768+ return pte_clear_flags(pte, _PAGE_NX);
18769+ else
18770+#endif
18771+ return pte_set_flags(pte, _PAGE_USER);
18772+}
18773+
18774+static inline pte_t pte_exprotect(pte_t pte)
18775+{
18776+#ifdef CONFIG_X86_PAE
18777+ if (__supported_pte_mask & _PAGE_NX)
18778+ return pte_set_flags(pte, _PAGE_NX);
18779+ else
18780+#endif
18781+ return pte_clear_flags(pte, _PAGE_USER);
18782 }
18783
18784 static inline pte_t pte_mkdirty(pte_t pte)
18785@@ -440,6 +507,16 @@ pte_t *populate_extra_pte(unsigned long vaddr);
18786 #endif
18787
18788 #ifndef __ASSEMBLY__
18789+
18790+#ifdef CONFIG_PAX_PER_CPU_PGD
18791+extern pgd_t cpu_pgd[NR_CPUS][2][PTRS_PER_PGD];
18792+enum cpu_pgd_type {kernel = 0, user = 1};
18793+static inline pgd_t *get_cpu_pgd(unsigned int cpu, enum cpu_pgd_type type)
18794+{
18795+ return cpu_pgd[cpu][type];
18796+}
18797+#endif
18798+
18799 #include <linux/mm_types.h>
18800 #include <linux/mmdebug.h>
18801 #include <linux/log2.h>
18802@@ -586,7 +663,7 @@ static inline unsigned long pud_page_vaddr(pud_t pud)
18803 * Currently stuck as a macro due to indirect forward reference to
18804 * linux/mmzone.h's __section_mem_map_addr() definition:
18805 */
18806-#define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT)
18807+#define pud_page(pud) pfn_to_page((pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT)
18808
18809 /* Find an entry in the second-level page table.. */
18810 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
18811@@ -626,7 +703,7 @@ static inline unsigned long pgd_page_vaddr(pgd_t pgd)
18812 * Currently stuck as a macro due to indirect forward reference to
18813 * linux/mmzone.h's __section_mem_map_addr() definition:
18814 */
18815-#define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
18816+#define pgd_page(pgd) pfn_to_page((pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT)
18817
18818 /* to find an entry in a page-table-directory. */
18819 static inline unsigned long pud_index(unsigned long address)
18820@@ -641,7 +718,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
18821
18822 static inline int pgd_bad(pgd_t pgd)
18823 {
18824- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
18825+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
18826 }
18827
18828 static inline int pgd_none(pgd_t pgd)
18829@@ -664,7 +741,12 @@ static inline int pgd_none(pgd_t pgd)
18830 * pgd_offset() returns a (pgd_t *)
18831 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
18832 */
18833-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
18834+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
18835+
18836+#ifdef CONFIG_PAX_PER_CPU_PGD
18837+#define pgd_offset_cpu(cpu, type, address) (get_cpu_pgd(cpu, type) + pgd_index(address))
18838+#endif
18839+
18840 /*
18841 * a shortcut which implies the use of the kernel's pgd, instead
18842 * of a process's
18843@@ -675,6 +757,23 @@ static inline int pgd_none(pgd_t pgd)
18844 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
18845 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
18846
18847+#ifdef CONFIG_X86_32
18848+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
18849+#else
18850+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
18851+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
18852+
18853+#ifdef CONFIG_PAX_MEMORY_UDEREF
18854+#ifdef __ASSEMBLY__
18855+#define pax_user_shadow_base pax_user_shadow_base(%rip)
18856+#else
18857+extern unsigned long pax_user_shadow_base;
18858+extern pgdval_t clone_pgd_mask;
18859+#endif
18860+#endif
18861+
18862+#endif
18863+
18864 #ifndef __ASSEMBLY__
18865
18866 extern int direct_gbpages;
18867@@ -841,11 +940,24 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
18868 * dst and src can be on the same page, but the range must not overlap,
18869 * and must not cross a page boundary.
18870 */
18871-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
18872+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
18873 {
18874- memcpy(dst, src, count * sizeof(pgd_t));
18875+ pax_open_kernel();
18876+ while (count--)
18877+ *dst++ = *src++;
18878+ pax_close_kernel();
18879 }
18880
18881+#ifdef CONFIG_PAX_PER_CPU_PGD
18882+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src);
18883+#endif
18884+
18885+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18886+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src);
18887+#else
18888+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src) {}
18889+#endif
18890+
18891 #define PTE_SHIFT ilog2(PTRS_PER_PTE)
18892 static inline int page_level_shift(enum pg_level level)
18893 {
18894diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
18895index 9ee3221..b979c6b 100644
18896--- a/arch/x86/include/asm/pgtable_32.h
18897+++ b/arch/x86/include/asm/pgtable_32.h
18898@@ -25,9 +25,6 @@
18899 struct mm_struct;
18900 struct vm_area_struct;
18901
18902-extern pgd_t swapper_pg_dir[1024];
18903-extern pgd_t initial_page_table[1024];
18904-
18905 static inline void pgtable_cache_init(void) { }
18906 static inline void check_pgt_cache(void) { }
18907 void paging_init(void);
18908@@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
18909 # include <asm/pgtable-2level.h>
18910 #endif
18911
18912+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
18913+extern pgd_t initial_page_table[PTRS_PER_PGD];
18914+#ifdef CONFIG_X86_PAE
18915+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
18916+#endif
18917+
18918 #if defined(CONFIG_HIGHPTE)
18919 #define pte_offset_map(dir, address) \
18920 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
18921@@ -62,12 +65,17 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
18922 /* Clear a kernel PTE and flush it from the TLB */
18923 #define kpte_clear_flush(ptep, vaddr) \
18924 do { \
18925+ pax_open_kernel(); \
18926 pte_clear(&init_mm, (vaddr), (ptep)); \
18927+ pax_close_kernel(); \
18928 __flush_tlb_one((vaddr)); \
18929 } while (0)
18930
18931 #endif /* !__ASSEMBLY__ */
18932
18933+#define HAVE_ARCH_UNMAPPED_AREA
18934+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
18935+
18936 /*
18937 * kern_addr_valid() is (1) for FLATMEM and (0) for
18938 * SPARSEMEM and DISCONTIGMEM
18939diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
18940index ed5903b..c7fe163 100644
18941--- a/arch/x86/include/asm/pgtable_32_types.h
18942+++ b/arch/x86/include/asm/pgtable_32_types.h
18943@@ -8,7 +8,7 @@
18944 */
18945 #ifdef CONFIG_X86_PAE
18946 # include <asm/pgtable-3level_types.h>
18947-# define PMD_SIZE (1UL << PMD_SHIFT)
18948+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
18949 # define PMD_MASK (~(PMD_SIZE - 1))
18950 #else
18951 # include <asm/pgtable-2level_types.h>
18952@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
18953 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
18954 #endif
18955
18956+#ifdef CONFIG_PAX_KERNEXEC
18957+#ifndef __ASSEMBLY__
18958+extern unsigned char MODULES_EXEC_VADDR[];
18959+extern unsigned char MODULES_EXEC_END[];
18960+#endif
18961+#include <asm/boot.h>
18962+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
18963+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
18964+#else
18965+#define ktla_ktva(addr) (addr)
18966+#define ktva_ktla(addr) (addr)
18967+#endif
18968+
18969 #define MODULES_VADDR VMALLOC_START
18970 #define MODULES_END VMALLOC_END
18971 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
18972diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
18973index 3874693..d7906ac 100644
18974--- a/arch/x86/include/asm/pgtable_64.h
18975+++ b/arch/x86/include/asm/pgtable_64.h
18976@@ -16,11 +16,16 @@
18977
18978 extern pud_t level3_kernel_pgt[512];
18979 extern pud_t level3_ident_pgt[512];
18980+extern pud_t level3_vmalloc_start_pgt[512];
18981+extern pud_t level3_vmalloc_end_pgt[512];
18982+extern pud_t level3_vmemmap_pgt[512];
18983+extern pud_t level2_vmemmap_pgt[512];
18984 extern pmd_t level2_kernel_pgt[512];
18985 extern pmd_t level2_fixmap_pgt[512];
18986-extern pmd_t level2_ident_pgt[512];
18987+extern pmd_t level2_ident_pgt[512*2];
18988 extern pte_t level1_fixmap_pgt[512];
18989-extern pgd_t init_level4_pgt[];
18990+extern pte_t level1_vsyscall_pgt[512];
18991+extern pgd_t init_level4_pgt[512];
18992
18993 #define swapper_pg_dir init_level4_pgt
18994
18995@@ -62,7 +67,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
18996
18997 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
18998 {
18999+ pax_open_kernel();
19000 *pmdp = pmd;
19001+ pax_close_kernel();
19002 }
19003
19004 static inline void native_pmd_clear(pmd_t *pmd)
19005@@ -98,7 +105,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
19006
19007 static inline void native_set_pud(pud_t *pudp, pud_t pud)
19008 {
19009+ pax_open_kernel();
19010 *pudp = pud;
19011+ pax_close_kernel();
19012 }
19013
19014 static inline void native_pud_clear(pud_t *pud)
19015@@ -108,6 +117,13 @@ static inline void native_pud_clear(pud_t *pud)
19016
19017 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
19018 {
19019+ pax_open_kernel();
19020+ *pgdp = pgd;
19021+ pax_close_kernel();
19022+}
19023+
19024+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
19025+{
19026 *pgdp = pgd;
19027 }
19028
19029diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
19030index 7166e25..baaa6fe 100644
19031--- a/arch/x86/include/asm/pgtable_64_types.h
19032+++ b/arch/x86/include/asm/pgtable_64_types.h
19033@@ -61,9 +61,14 @@ typedef struct { pteval_t pte; } pte_t;
19034 #define MODULES_VADDR (__START_KERNEL_map + KERNEL_IMAGE_SIZE)
19035 #define MODULES_END _AC(0xffffffffff000000, UL)
19036 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
19037+#define MODULES_EXEC_VADDR MODULES_VADDR
19038+#define MODULES_EXEC_END MODULES_END
19039 #define ESPFIX_PGD_ENTRY _AC(-2, UL)
19040 #define ESPFIX_BASE_ADDR (ESPFIX_PGD_ENTRY << PGDIR_SHIFT)
19041
19042+#define ktla_ktva(addr) (addr)
19043+#define ktva_ktla(addr) (addr)
19044+
19045 #define EARLY_DYNAMIC_PAGE_TABLES 64
19046
19047 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
19048diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
19049index f216963..6bd7c21 100644
19050--- a/arch/x86/include/asm/pgtable_types.h
19051+++ b/arch/x86/include/asm/pgtable_types.h
19052@@ -111,8 +111,10 @@
19053
19054 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
19055 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
19056-#else
19057+#elif defined(CONFIG_KMEMCHECK) || defined(CONFIG_MEM_SOFT_DIRTY)
19058 #define _PAGE_NX (_AT(pteval_t, 0))
19059+#else
19060+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
19061 #endif
19062
19063 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
19064@@ -151,6 +153,9 @@
19065 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
19066 _PAGE_ACCESSED)
19067
19068+#define PAGE_READONLY_NOEXEC PAGE_READONLY
19069+#define PAGE_SHARED_NOEXEC PAGE_SHARED
19070+
19071 #define __PAGE_KERNEL_EXEC \
19072 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
19073 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
19074@@ -161,7 +166,7 @@
19075 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
19076 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
19077 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
19078-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
19079+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
19080 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
19081 #define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT)
19082 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
19083@@ -218,7 +223,7 @@
19084 #ifdef CONFIG_X86_64
19085 #define __PAGE_KERNEL_IDENT_LARGE_EXEC __PAGE_KERNEL_LARGE_EXEC
19086 #else
19087-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
19088+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
19089 #define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
19090 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
19091 #endif
19092@@ -257,7 +262,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
19093 {
19094 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
19095 }
19096+#endif
19097
19098+#if PAGETABLE_LEVELS == 3
19099+#include <asm-generic/pgtable-nopud.h>
19100+#endif
19101+
19102+#if PAGETABLE_LEVELS == 2
19103+#include <asm-generic/pgtable-nopmd.h>
19104+#endif
19105+
19106+#ifndef __ASSEMBLY__
19107 #if PAGETABLE_LEVELS > 3
19108 typedef struct { pudval_t pud; } pud_t;
19109
19110@@ -271,8 +286,6 @@ static inline pudval_t native_pud_val(pud_t pud)
19111 return pud.pud;
19112 }
19113 #else
19114-#include <asm-generic/pgtable-nopud.h>
19115-
19116 static inline pudval_t native_pud_val(pud_t pud)
19117 {
19118 return native_pgd_val(pud.pgd);
19119@@ -292,8 +305,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
19120 return pmd.pmd;
19121 }
19122 #else
19123-#include <asm-generic/pgtable-nopmd.h>
19124-
19125 static inline pmdval_t native_pmd_val(pmd_t pmd)
19126 {
19127 return native_pgd_val(pmd.pud.pgd);
19128@@ -333,7 +344,6 @@ typedef struct page *pgtable_t;
19129
19130 extern pteval_t __supported_pte_mask;
19131 extern void set_nx(void);
19132-extern int nx_enabled;
19133
19134 #define pgprot_writecombine pgprot_writecombine
19135 extern pgprot_t pgprot_writecombine(pgprot_t prot);
19136diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
19137index 7024c12..71c46b9 100644
19138--- a/arch/x86/include/asm/preempt.h
19139+++ b/arch/x86/include/asm/preempt.h
19140@@ -87,7 +87,7 @@ static __always_inline void __preempt_count_sub(int val)
19141 */
19142 static __always_inline bool __preempt_count_dec_and_test(void)
19143 {
19144- GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e");
19145+ GEN_UNARY_RMWcc("decl", "incl", __preempt_count, __percpu_arg(0), "e");
19146 }
19147
19148 /*
19149diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
19150index eb71ec7..f06532a 100644
19151--- a/arch/x86/include/asm/processor.h
19152+++ b/arch/x86/include/asm/processor.h
19153@@ -127,7 +127,7 @@ struct cpuinfo_x86 {
19154 /* Index into per_cpu list: */
19155 u16 cpu_index;
19156 u32 microcode;
19157-} __attribute__((__aligned__(SMP_CACHE_BYTES)));
19158+} __attribute__((__aligned__(SMP_CACHE_BYTES))) __randomize_layout;
19159
19160 #define X86_VENDOR_INTEL 0
19161 #define X86_VENDOR_CYRIX 1
19162@@ -198,9 +198,21 @@ static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
19163 : "memory");
19164 }
19165
19166+/* invpcid (%rdx),%rax */
19167+#define __ASM_INVPCID ".byte 0x66,0x0f,0x38,0x82,0x02"
19168+
19169+#define INVPCID_SINGLE_ADDRESS 0UL
19170+#define INVPCID_SINGLE_CONTEXT 1UL
19171+#define INVPCID_ALL_GLOBAL 2UL
19172+#define INVPCID_ALL_NONGLOBAL 3UL
19173+
19174+#define PCID_KERNEL 0UL
19175+#define PCID_USER 1UL
19176+#define PCID_NOFLUSH (1UL << 63)
19177+
19178 static inline void load_cr3(pgd_t *pgdir)
19179 {
19180- write_cr3(__pa(pgdir));
19181+ write_cr3(__pa(pgdir) | PCID_KERNEL);
19182 }
19183
19184 #ifdef CONFIG_X86_32
19185@@ -282,7 +294,7 @@ struct tss_struct {
19186
19187 } ____cacheline_aligned;
19188
19189-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
19190+extern struct tss_struct init_tss[NR_CPUS];
19191
19192 /*
19193 * Save the original ist values for checking stack pointers during debugging
19194@@ -478,6 +490,7 @@ struct thread_struct {
19195 unsigned short ds;
19196 unsigned short fsindex;
19197 unsigned short gsindex;
19198+ unsigned short ss;
19199 #endif
19200 #ifdef CONFIG_X86_32
19201 unsigned long ip;
19202@@ -587,29 +600,8 @@ static inline void load_sp0(struct tss_struct *tss,
19203 extern unsigned long mmu_cr4_features;
19204 extern u32 *trampoline_cr4_features;
19205
19206-static inline void set_in_cr4(unsigned long mask)
19207-{
19208- unsigned long cr4;
19209-
19210- mmu_cr4_features |= mask;
19211- if (trampoline_cr4_features)
19212- *trampoline_cr4_features = mmu_cr4_features;
19213- cr4 = read_cr4();
19214- cr4 |= mask;
19215- write_cr4(cr4);
19216-}
19217-
19218-static inline void clear_in_cr4(unsigned long mask)
19219-{
19220- unsigned long cr4;
19221-
19222- mmu_cr4_features &= ~mask;
19223- if (trampoline_cr4_features)
19224- *trampoline_cr4_features = mmu_cr4_features;
19225- cr4 = read_cr4();
19226- cr4 &= ~mask;
19227- write_cr4(cr4);
19228-}
19229+extern void set_in_cr4(unsigned long mask);
19230+extern void clear_in_cr4(unsigned long mask);
19231
19232 typedef struct {
19233 unsigned long seg;
19234@@ -837,11 +829,18 @@ static inline void spin_lock_prefetch(const void *x)
19235 */
19236 #define TASK_SIZE PAGE_OFFSET
19237 #define TASK_SIZE_MAX TASK_SIZE
19238+
19239+#ifdef CONFIG_PAX_SEGMEXEC
19240+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
19241+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
19242+#else
19243 #define STACK_TOP TASK_SIZE
19244-#define STACK_TOP_MAX STACK_TOP
19245+#endif
19246+
19247+#define STACK_TOP_MAX TASK_SIZE
19248
19249 #define INIT_THREAD { \
19250- .sp0 = sizeof(init_stack) + (long)&init_stack, \
19251+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
19252 .vm86_info = NULL, \
19253 .sysenter_cs = __KERNEL_CS, \
19254 .io_bitmap_ptr = NULL, \
19255@@ -855,7 +854,7 @@ static inline void spin_lock_prefetch(const void *x)
19256 */
19257 #define INIT_TSS { \
19258 .x86_tss = { \
19259- .sp0 = sizeof(init_stack) + (long)&init_stack, \
19260+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
19261 .ss0 = __KERNEL_DS, \
19262 .ss1 = __KERNEL_CS, \
19263 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
19264@@ -866,11 +865,7 @@ static inline void spin_lock_prefetch(const void *x)
19265 extern unsigned long thread_saved_pc(struct task_struct *tsk);
19266
19267 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
19268-#define KSTK_TOP(info) \
19269-({ \
19270- unsigned long *__ptr = (unsigned long *)(info); \
19271- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
19272-})
19273+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
19274
19275 /*
19276 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
19277@@ -885,7 +880,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
19278 #define task_pt_regs(task) \
19279 ({ \
19280 struct pt_regs *__regs__; \
19281- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
19282+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
19283 __regs__ - 1; \
19284 })
19285
19286@@ -895,13 +890,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
19287 /*
19288 * User space process size. 47bits minus one guard page.
19289 */
19290-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
19291+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
19292
19293 /* This decides where the kernel will search for a free chunk of vm
19294 * space during mmap's.
19295 */
19296 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
19297- 0xc0000000 : 0xFFFFe000)
19298+ 0xc0000000 : 0xFFFFf000)
19299
19300 #define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \
19301 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
19302@@ -912,11 +907,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
19303 #define STACK_TOP_MAX TASK_SIZE_MAX
19304
19305 #define INIT_THREAD { \
19306- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
19307+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
19308 }
19309
19310 #define INIT_TSS { \
19311- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
19312+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
19313 }
19314
19315 /*
19316@@ -944,6 +939,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
19317 */
19318 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
19319
19320+#ifdef CONFIG_PAX_SEGMEXEC
19321+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
19322+#endif
19323+
19324 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
19325
19326 /* Get/set a process' ability to use the timestamp counter instruction */
19327@@ -970,7 +969,7 @@ static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves)
19328 return 0;
19329 }
19330
19331-extern unsigned long arch_align_stack(unsigned long sp);
19332+#define arch_align_stack(x) ((x) & ~0xfUL)
19333 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
19334
19335 void default_idle(void);
19336@@ -980,6 +979,6 @@ bool xen_set_default_idle(void);
19337 #define xen_set_default_idle 0
19338 #endif
19339
19340-void stop_this_cpu(void *dummy);
19341+void stop_this_cpu(void *dummy) __noreturn;
19342 void df_debug(struct pt_regs *regs, long error_code);
19343 #endif /* _ASM_X86_PROCESSOR_H */
19344diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
19345index 6205f0c..688a3a9 100644
19346--- a/arch/x86/include/asm/ptrace.h
19347+++ b/arch/x86/include/asm/ptrace.h
19348@@ -84,28 +84,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
19349 }
19350
19351 /*
19352- * user_mode_vm(regs) determines whether a register set came from user mode.
19353+ * user_mode(regs) determines whether a register set came from user mode.
19354 * This is true if V8086 mode was enabled OR if the register set was from
19355 * protected mode with RPL-3 CS value. This tricky test checks that with
19356 * one comparison. Many places in the kernel can bypass this full check
19357- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
19358+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
19359+ * be used.
19360 */
19361-static inline int user_mode(struct pt_regs *regs)
19362+static inline int user_mode_novm(struct pt_regs *regs)
19363 {
19364 #ifdef CONFIG_X86_32
19365 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
19366 #else
19367- return !!(regs->cs & 3);
19368+ return !!(regs->cs & SEGMENT_RPL_MASK);
19369 #endif
19370 }
19371
19372-static inline int user_mode_vm(struct pt_regs *regs)
19373+static inline int user_mode(struct pt_regs *regs)
19374 {
19375 #ifdef CONFIG_X86_32
19376 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
19377 USER_RPL;
19378 #else
19379- return user_mode(regs);
19380+ return user_mode_novm(regs);
19381 #endif
19382 }
19383
19384@@ -121,15 +122,16 @@ static inline int v8086_mode(struct pt_regs *regs)
19385 #ifdef CONFIG_X86_64
19386 static inline bool user_64bit_mode(struct pt_regs *regs)
19387 {
19388+ unsigned long cs = regs->cs & 0xffff;
19389 #ifndef CONFIG_PARAVIRT
19390 /*
19391 * On non-paravirt systems, this is the only long mode CPL 3
19392 * selector. We do not allow long mode selectors in the LDT.
19393 */
19394- return regs->cs == __USER_CS;
19395+ return cs == __USER_CS;
19396 #else
19397 /* Headers are too twisted for this to go in paravirt.h. */
19398- return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
19399+ return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
19400 #endif
19401 }
19402
19403@@ -180,9 +182,11 @@ static inline unsigned long regs_get_register(struct pt_regs *regs,
19404 * Traps from the kernel do not save sp and ss.
19405 * Use the helper function to retrieve sp.
19406 */
19407- if (offset == offsetof(struct pt_regs, sp) &&
19408- regs->cs == __KERNEL_CS)
19409- return kernel_stack_pointer(regs);
19410+ if (offset == offsetof(struct pt_regs, sp)) {
19411+ unsigned long cs = regs->cs & 0xffff;
19412+ if (cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS)
19413+ return kernel_stack_pointer(regs);
19414+ }
19415 #endif
19416 return *(unsigned long *)((unsigned long)regs + offset);
19417 }
19418diff --git a/arch/x86/include/asm/qrwlock.h b/arch/x86/include/asm/qrwlock.h
19419index ae0e241..e80b10b 100644
19420--- a/arch/x86/include/asm/qrwlock.h
19421+++ b/arch/x86/include/asm/qrwlock.h
19422@@ -7,8 +7,8 @@
19423 #define queue_write_unlock queue_write_unlock
19424 static inline void queue_write_unlock(struct qrwlock *lock)
19425 {
19426- barrier();
19427- ACCESS_ONCE(*(u8 *)&lock->cnts) = 0;
19428+ barrier();
19429+ ACCESS_ONCE_RW(*(u8 *)&lock->cnts) = 0;
19430 }
19431 #endif
19432
19433diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h
19434index 9c6b890..5305f53 100644
19435--- a/arch/x86/include/asm/realmode.h
19436+++ b/arch/x86/include/asm/realmode.h
19437@@ -22,16 +22,14 @@ struct real_mode_header {
19438 #endif
19439 /* APM/BIOS reboot */
19440 u32 machine_real_restart_asm;
19441-#ifdef CONFIG_X86_64
19442 u32 machine_real_restart_seg;
19443-#endif
19444 };
19445
19446 /* This must match data at trampoline_32/64.S */
19447 struct trampoline_header {
19448 #ifdef CONFIG_X86_32
19449 u32 start;
19450- u16 gdt_pad;
19451+ u16 boot_cs;
19452 u16 gdt_limit;
19453 u32 gdt_base;
19454 #else
19455diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
19456index a82c4f1..ac45053 100644
19457--- a/arch/x86/include/asm/reboot.h
19458+++ b/arch/x86/include/asm/reboot.h
19459@@ -6,13 +6,13 @@
19460 struct pt_regs;
19461
19462 struct machine_ops {
19463- void (*restart)(char *cmd);
19464- void (*halt)(void);
19465- void (*power_off)(void);
19466+ void (* __noreturn restart)(char *cmd);
19467+ void (* __noreturn halt)(void);
19468+ void (* __noreturn power_off)(void);
19469 void (*shutdown)(void);
19470 void (*crash_shutdown)(struct pt_regs *);
19471- void (*emergency_restart)(void);
19472-};
19473+ void (* __noreturn emergency_restart)(void);
19474+} __no_const;
19475
19476 extern struct machine_ops machine_ops;
19477
19478diff --git a/arch/x86/include/asm/rmwcc.h b/arch/x86/include/asm/rmwcc.h
19479index 8f7866a..e442f20 100644
19480--- a/arch/x86/include/asm/rmwcc.h
19481+++ b/arch/x86/include/asm/rmwcc.h
19482@@ -3,7 +3,34 @@
19483
19484 #ifdef CC_HAVE_ASM_GOTO
19485
19486-#define __GEN_RMWcc(fullop, var, cc, ...) \
19487+#ifdef CONFIG_PAX_REFCOUNT
19488+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
19489+do { \
19490+ asm_volatile_goto (fullop \
19491+ ";jno 0f\n" \
19492+ fullantiop \
19493+ ";int $4\n0:\n" \
19494+ _ASM_EXTABLE(0b, 0b) \
19495+ ";j" cc " %l[cc_label]" \
19496+ : : "m" (var), ## __VA_ARGS__ \
19497+ : "memory" : cc_label); \
19498+ return 0; \
19499+cc_label: \
19500+ return 1; \
19501+} while (0)
19502+#else
19503+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
19504+do { \
19505+ asm_volatile_goto (fullop ";j" cc " %l[cc_label]" \
19506+ : : "m" (var), ## __VA_ARGS__ \
19507+ : "memory" : cc_label); \
19508+ return 0; \
19509+cc_label: \
19510+ return 1; \
19511+} while (0)
19512+#endif
19513+
19514+#define __GEN_RMWcc_unchecked(fullop, var, cc, ...) \
19515 do { \
19516 asm_volatile_goto (fullop "; j" cc " %l[cc_label]" \
19517 : : "m" (var), ## __VA_ARGS__ \
19518@@ -13,15 +40,46 @@ cc_label: \
19519 return 1; \
19520 } while (0)
19521
19522-#define GEN_UNARY_RMWcc(op, var, arg0, cc) \
19523- __GEN_RMWcc(op " " arg0, var, cc)
19524+#define GEN_UNARY_RMWcc(op, antiop, var, arg0, cc) \
19525+ __GEN_RMWcc(op " " arg0, antiop " " arg0, var, cc)
19526
19527-#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \
19528- __GEN_RMWcc(op " %1, " arg0, var, cc, vcon (val))
19529+#define GEN_UNARY_RMWcc_unchecked(op, var, arg0, cc) \
19530+ __GEN_RMWcc_unchecked(op " " arg0, var, cc)
19531+
19532+#define GEN_BINARY_RMWcc(op, antiop, var, vcon, val, arg0, cc) \
19533+ __GEN_RMWcc(op " %1, " arg0, antiop " %1, " arg0, var, cc, vcon (val))
19534+
19535+#define GEN_BINARY_RMWcc_unchecked(op, var, vcon, val, arg0, cc) \
19536+ __GEN_RMWcc_unchecked(op " %1, " arg0, var, cc, vcon (val))
19537
19538 #else /* !CC_HAVE_ASM_GOTO */
19539
19540-#define __GEN_RMWcc(fullop, var, cc, ...) \
19541+#ifdef CONFIG_PAX_REFCOUNT
19542+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
19543+do { \
19544+ char c; \
19545+ asm volatile (fullop \
19546+ ";jno 0f\n" \
19547+ fullantiop \
19548+ ";int $4\n0:\n" \
19549+ _ASM_EXTABLE(0b, 0b) \
19550+ "; set" cc " %1" \
19551+ : "+m" (var), "=qm" (c) \
19552+ : __VA_ARGS__ : "memory"); \
19553+ return c != 0; \
19554+} while (0)
19555+#else
19556+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
19557+do { \
19558+ char c; \
19559+ asm volatile (fullop "; set" cc " %1" \
19560+ : "+m" (var), "=qm" (c) \
19561+ : __VA_ARGS__ : "memory"); \
19562+ return c != 0; \
19563+} while (0)
19564+#endif
19565+
19566+#define __GEN_RMWcc_unchecked(fullop, var, cc, ...) \
19567 do { \
19568 char c; \
19569 asm volatile (fullop "; set" cc " %1" \
19570@@ -30,11 +88,17 @@ do { \
19571 return c != 0; \
19572 } while (0)
19573
19574-#define GEN_UNARY_RMWcc(op, var, arg0, cc) \
19575- __GEN_RMWcc(op " " arg0, var, cc)
19576+#define GEN_UNARY_RMWcc(op, antiop, var, arg0, cc) \
19577+ __GEN_RMWcc(op " " arg0, antiop " " arg0, var, cc)
19578+
19579+#define GEN_UNARY_RMWcc_unchecked(op, var, arg0, cc) \
19580+ __GEN_RMWcc_unchecked(op " " arg0, var, cc)
19581+
19582+#define GEN_BINARY_RMWcc(op, antiop, var, vcon, val, arg0, cc) \
19583+ __GEN_RMWcc(op " %2, " arg0, antiop " %2, " arg0, var, cc, vcon (val))
19584
19585-#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \
19586- __GEN_RMWcc(op " %2, " arg0, var, cc, vcon (val))
19587+#define GEN_BINARY_RMWcc_unchecked(op, var, vcon, val, arg0, cc) \
19588+ __GEN_RMWcc_unchecked(op " %2, " arg0, var, cc, vcon (val))
19589
19590 #endif /* CC_HAVE_ASM_GOTO */
19591
19592diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
19593index cad82c9..2e5c5c1 100644
19594--- a/arch/x86/include/asm/rwsem.h
19595+++ b/arch/x86/include/asm/rwsem.h
19596@@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
19597 {
19598 asm volatile("# beginning down_read\n\t"
19599 LOCK_PREFIX _ASM_INC "(%1)\n\t"
19600+
19601+#ifdef CONFIG_PAX_REFCOUNT
19602+ "jno 0f\n"
19603+ LOCK_PREFIX _ASM_DEC "(%1)\n"
19604+ "int $4\n0:\n"
19605+ _ASM_EXTABLE(0b, 0b)
19606+#endif
19607+
19608 /* adds 0x00000001 */
19609 " jns 1f\n"
19610 " call call_rwsem_down_read_failed\n"
19611@@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
19612 "1:\n\t"
19613 " mov %1,%2\n\t"
19614 " add %3,%2\n\t"
19615+
19616+#ifdef CONFIG_PAX_REFCOUNT
19617+ "jno 0f\n"
19618+ "sub %3,%2\n"
19619+ "int $4\n0:\n"
19620+ _ASM_EXTABLE(0b, 0b)
19621+#endif
19622+
19623 " jle 2f\n\t"
19624 LOCK_PREFIX " cmpxchg %2,%0\n\t"
19625 " jnz 1b\n\t"
19626@@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
19627 long tmp;
19628 asm volatile("# beginning down_write\n\t"
19629 LOCK_PREFIX " xadd %1,(%2)\n\t"
19630+
19631+#ifdef CONFIG_PAX_REFCOUNT
19632+ "jno 0f\n"
19633+ "mov %1,(%2)\n"
19634+ "int $4\n0:\n"
19635+ _ASM_EXTABLE(0b, 0b)
19636+#endif
19637+
19638 /* adds 0xffff0001, returns the old value */
19639 " test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t"
19640 /* was the active mask 0 before? */
19641@@ -155,6 +179,14 @@ static inline void __up_read(struct rw_semaphore *sem)
19642 long tmp;
19643 asm volatile("# beginning __up_read\n\t"
19644 LOCK_PREFIX " xadd %1,(%2)\n\t"
19645+
19646+#ifdef CONFIG_PAX_REFCOUNT
19647+ "jno 0f\n"
19648+ "mov %1,(%2)\n"
19649+ "int $4\n0:\n"
19650+ _ASM_EXTABLE(0b, 0b)
19651+#endif
19652+
19653 /* subtracts 1, returns the old value */
19654 " jns 1f\n\t"
19655 " call call_rwsem_wake\n" /* expects old value in %edx */
19656@@ -173,6 +205,14 @@ static inline void __up_write(struct rw_semaphore *sem)
19657 long tmp;
19658 asm volatile("# beginning __up_write\n\t"
19659 LOCK_PREFIX " xadd %1,(%2)\n\t"
19660+
19661+#ifdef CONFIG_PAX_REFCOUNT
19662+ "jno 0f\n"
19663+ "mov %1,(%2)\n"
19664+ "int $4\n0:\n"
19665+ _ASM_EXTABLE(0b, 0b)
19666+#endif
19667+
19668 /* subtracts 0xffff0001, returns the old value */
19669 " jns 1f\n\t"
19670 " call call_rwsem_wake\n" /* expects old value in %edx */
19671@@ -190,6 +230,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
19672 {
19673 asm volatile("# beginning __downgrade_write\n\t"
19674 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
19675+
19676+#ifdef CONFIG_PAX_REFCOUNT
19677+ "jno 0f\n"
19678+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
19679+ "int $4\n0:\n"
19680+ _ASM_EXTABLE(0b, 0b)
19681+#endif
19682+
19683 /*
19684 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
19685 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
19686@@ -208,7 +256,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
19687 */
19688 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
19689 {
19690- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
19691+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
19692+
19693+#ifdef CONFIG_PAX_REFCOUNT
19694+ "jno 0f\n"
19695+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
19696+ "int $4\n0:\n"
19697+ _ASM_EXTABLE(0b, 0b)
19698+#endif
19699+
19700 : "+m" (sem->count)
19701 : "er" (delta));
19702 }
19703@@ -218,7 +274,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
19704 */
19705 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
19706 {
19707- return delta + xadd(&sem->count, delta);
19708+ return delta + xadd_check_overflow(&sem->count, delta);
19709 }
19710
19711 #endif /* __KERNEL__ */
19712diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
19713index 6f1c3a8..7744f19 100644
19714--- a/arch/x86/include/asm/segment.h
19715+++ b/arch/x86/include/asm/segment.h
19716@@ -64,10 +64,15 @@
19717 * 26 - ESPFIX small SS
19718 * 27 - per-cpu [ offset to per-cpu data area ]
19719 * 28 - stack_canary-20 [ for stack protector ]
19720- * 29 - unused
19721- * 30 - unused
19722+ * 29 - PCI BIOS CS
19723+ * 30 - PCI BIOS DS
19724 * 31 - TSS for double fault handler
19725 */
19726+#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
19727+#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
19728+#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
19729+#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
19730+
19731 #define GDT_ENTRY_TLS_MIN 6
19732 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
19733
19734@@ -79,6 +84,8 @@
19735
19736 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
19737
19738+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
19739+
19740 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
19741
19742 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
19743@@ -104,6 +111,12 @@
19744 #define __KERNEL_STACK_CANARY 0
19745 #endif
19746
19747+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
19748+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
19749+
19750+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
19751+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
19752+
19753 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
19754
19755 /*
19756@@ -141,7 +154,7 @@
19757 */
19758
19759 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
19760-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
19761+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
19762
19763
19764 #else
19765@@ -165,6 +178,8 @@
19766 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
19767 #define __USER32_DS __USER_DS
19768
19769+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
19770+
19771 #define GDT_ENTRY_TSS 8 /* needs two entries */
19772 #define GDT_ENTRY_LDT 10 /* needs two entries */
19773 #define GDT_ENTRY_TLS_MIN 12
19774@@ -173,6 +188,8 @@
19775 #define GDT_ENTRY_PER_CPU 15 /* Abused to load per CPU data from limit */
19776 #define __PER_CPU_SEG (GDT_ENTRY_PER_CPU * 8 + 3)
19777
19778+#define GDT_ENTRY_UDEREF_KERNEL_DS 16
19779+
19780 /* TLS indexes for 64bit - hardcoded in arch_prctl */
19781 #define FS_TLS 0
19782 #define GS_TLS 1
19783@@ -180,12 +197,14 @@
19784 #define GS_TLS_SEL ((GDT_ENTRY_TLS_MIN+GS_TLS)*8 + 3)
19785 #define FS_TLS_SEL ((GDT_ENTRY_TLS_MIN+FS_TLS)*8 + 3)
19786
19787-#define GDT_ENTRIES 16
19788+#define GDT_ENTRIES 17
19789
19790 #endif
19791
19792 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
19793+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
19794 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
19795+#define __UDEREF_KERNEL_DS (GDT_ENTRY_UDEREF_KERNEL_DS*8)
19796 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
19797 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
19798 #ifndef CONFIG_PARAVIRT
19799@@ -268,7 +287,7 @@ static inline unsigned long get_limit(unsigned long segment)
19800 {
19801 unsigned long __limit;
19802 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
19803- return __limit + 1;
19804+ return __limit;
19805 }
19806
19807 #endif /* !__ASSEMBLY__ */
19808diff --git a/arch/x86/include/asm/smap.h b/arch/x86/include/asm/smap.h
19809index 8d3120f..352b440 100644
19810--- a/arch/x86/include/asm/smap.h
19811+++ b/arch/x86/include/asm/smap.h
19812@@ -25,11 +25,40 @@
19813
19814 #include <asm/alternative-asm.h>
19815
19816+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19817+#define ASM_PAX_OPEN_USERLAND \
19818+ 661: jmp 663f; \
19819+ .pushsection .altinstr_replacement, "a" ; \
19820+ 662: pushq %rax; nop; \
19821+ .popsection ; \
19822+ .pushsection .altinstructions, "a" ; \
19823+ altinstruction_entry 661b, 662b, X86_FEATURE_STRONGUDEREF, 2, 2;\
19824+ .popsection ; \
19825+ call __pax_open_userland; \
19826+ popq %rax; \
19827+ 663:
19828+
19829+#define ASM_PAX_CLOSE_USERLAND \
19830+ 661: jmp 663f; \
19831+ .pushsection .altinstr_replacement, "a" ; \
19832+ 662: pushq %rax; nop; \
19833+ .popsection; \
19834+ .pushsection .altinstructions, "a" ; \
19835+ altinstruction_entry 661b, 662b, X86_FEATURE_STRONGUDEREF, 2, 2;\
19836+ .popsection; \
19837+ call __pax_close_userland; \
19838+ popq %rax; \
19839+ 663:
19840+#else
19841+#define ASM_PAX_OPEN_USERLAND
19842+#define ASM_PAX_CLOSE_USERLAND
19843+#endif
19844+
19845 #ifdef CONFIG_X86_SMAP
19846
19847 #define ASM_CLAC \
19848 661: ASM_NOP3 ; \
19849- .pushsection .altinstr_replacement, "ax" ; \
19850+ .pushsection .altinstr_replacement, "a" ; \
19851 662: __ASM_CLAC ; \
19852 .popsection ; \
19853 .pushsection .altinstructions, "a" ; \
19854@@ -38,7 +67,7 @@
19855
19856 #define ASM_STAC \
19857 661: ASM_NOP3 ; \
19858- .pushsection .altinstr_replacement, "ax" ; \
19859+ .pushsection .altinstr_replacement, "a" ; \
19860 662: __ASM_STAC ; \
19861 .popsection ; \
19862 .pushsection .altinstructions, "a" ; \
19863@@ -56,6 +85,37 @@
19864
19865 #include <asm/alternative.h>
19866
19867+#define __HAVE_ARCH_PAX_OPEN_USERLAND
19868+#define __HAVE_ARCH_PAX_CLOSE_USERLAND
19869+
19870+extern void __pax_open_userland(void);
19871+static __always_inline unsigned long pax_open_userland(void)
19872+{
19873+
19874+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19875+ asm volatile(ALTERNATIVE(ASM_NOP5, "call %P[open]", X86_FEATURE_STRONGUDEREF)
19876+ :
19877+ : [open] "i" (__pax_open_userland)
19878+ : "memory", "rax");
19879+#endif
19880+
19881+ return 0;
19882+}
19883+
19884+extern void __pax_close_userland(void);
19885+static __always_inline unsigned long pax_close_userland(void)
19886+{
19887+
19888+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19889+ asm volatile(ALTERNATIVE(ASM_NOP5, "call %P[close]", X86_FEATURE_STRONGUDEREF)
19890+ :
19891+ : [close] "i" (__pax_close_userland)
19892+ : "memory", "rax");
19893+#endif
19894+
19895+ return 0;
19896+}
19897+
19898 #ifdef CONFIG_X86_SMAP
19899
19900 static __always_inline void clac(void)
19901diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
19902index 8cd27e0..7f05ec8 100644
19903--- a/arch/x86/include/asm/smp.h
19904+++ b/arch/x86/include/asm/smp.h
19905@@ -35,7 +35,7 @@ DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
19906 /* cpus sharing the last level cache: */
19907 DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
19908 DECLARE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id);
19909-DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
19910+DECLARE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
19911
19912 static inline struct cpumask *cpu_sibling_mask(int cpu)
19913 {
19914@@ -78,7 +78,7 @@ struct smp_ops {
19915
19916 void (*send_call_func_ipi)(const struct cpumask *mask);
19917 void (*send_call_func_single_ipi)(int cpu);
19918-};
19919+} __no_const;
19920
19921 /* Globals due to paravirt */
19922 extern void set_cpu_sibling_map(int cpu);
19923@@ -190,14 +190,8 @@ extern unsigned disabled_cpus;
19924 extern int safe_smp_processor_id(void);
19925
19926 #elif defined(CONFIG_X86_64_SMP)
19927-#define raw_smp_processor_id() (this_cpu_read(cpu_number))
19928-
19929-#define stack_smp_processor_id() \
19930-({ \
19931- struct thread_info *ti; \
19932- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
19933- ti->cpu; \
19934-})
19935+#define raw_smp_processor_id() (this_cpu_read(cpu_number))
19936+#define stack_smp_processor_id() raw_smp_processor_id()
19937 #define safe_smp_processor_id() smp_processor_id()
19938
19939 #endif
19940diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
19941index 54f1c80..39362a5 100644
19942--- a/arch/x86/include/asm/spinlock.h
19943+++ b/arch/x86/include/asm/spinlock.h
19944@@ -223,6 +223,14 @@ static inline int arch_write_can_lock(arch_rwlock_t *lock)
19945 static inline void arch_read_lock(arch_rwlock_t *rw)
19946 {
19947 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
19948+
19949+#ifdef CONFIG_PAX_REFCOUNT
19950+ "jno 0f\n"
19951+ LOCK_PREFIX READ_LOCK_SIZE(inc) " (%0)\n"
19952+ "int $4\n0:\n"
19953+ _ASM_EXTABLE(0b, 0b)
19954+#endif
19955+
19956 "jns 1f\n"
19957 "call __read_lock_failed\n\t"
19958 "1:\n"
19959@@ -232,6 +240,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
19960 static inline void arch_write_lock(arch_rwlock_t *rw)
19961 {
19962 asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
19963+
19964+#ifdef CONFIG_PAX_REFCOUNT
19965+ "jno 0f\n"
19966+ LOCK_PREFIX WRITE_LOCK_ADD(%1) "(%0)\n"
19967+ "int $4\n0:\n"
19968+ _ASM_EXTABLE(0b, 0b)
19969+#endif
19970+
19971 "jz 1f\n"
19972 "call __write_lock_failed\n\t"
19973 "1:\n"
19974@@ -261,13 +277,29 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
19975
19976 static inline void arch_read_unlock(arch_rwlock_t *rw)
19977 {
19978- asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
19979+ asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0\n"
19980+
19981+#ifdef CONFIG_PAX_REFCOUNT
19982+ "jno 0f\n"
19983+ LOCK_PREFIX READ_LOCK_SIZE(dec) " %0\n"
19984+ "int $4\n0:\n"
19985+ _ASM_EXTABLE(0b, 0b)
19986+#endif
19987+
19988 :"+m" (rw->lock) : : "memory");
19989 }
19990
19991 static inline void arch_write_unlock(arch_rwlock_t *rw)
19992 {
19993- asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
19994+ asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0\n"
19995+
19996+#ifdef CONFIG_PAX_REFCOUNT
19997+ "jno 0f\n"
19998+ LOCK_PREFIX WRITE_LOCK_SUB(%1) "%0\n"
19999+ "int $4\n0:\n"
20000+ _ASM_EXTABLE(0b, 0b)
20001+#endif
20002+
20003 : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
20004 }
20005 #else
20006diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
20007index 6a99859..03cb807 100644
20008--- a/arch/x86/include/asm/stackprotector.h
20009+++ b/arch/x86/include/asm/stackprotector.h
20010@@ -47,7 +47,7 @@
20011 * head_32 for boot CPU and setup_per_cpu_areas() for others.
20012 */
20013 #define GDT_STACK_CANARY_INIT \
20014- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
20015+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
20016
20017 /*
20018 * Initialize the stackprotector canary value.
20019@@ -112,7 +112,7 @@ static inline void setup_stack_canary_segment(int cpu)
20020
20021 static inline void load_stack_canary_segment(void)
20022 {
20023-#ifdef CONFIG_X86_32
20024+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
20025 asm volatile ("mov %0, %%gs" : : "r" (0));
20026 #endif
20027 }
20028diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
20029index 70bbe39..4ae2bd4 100644
20030--- a/arch/x86/include/asm/stacktrace.h
20031+++ b/arch/x86/include/asm/stacktrace.h
20032@@ -11,28 +11,20 @@
20033
20034 extern int kstack_depth_to_print;
20035
20036-struct thread_info;
20037+struct task_struct;
20038 struct stacktrace_ops;
20039
20040-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
20041- unsigned long *stack,
20042- unsigned long bp,
20043- const struct stacktrace_ops *ops,
20044- void *data,
20045- unsigned long *end,
20046- int *graph);
20047+typedef unsigned long walk_stack_t(struct task_struct *task,
20048+ void *stack_start,
20049+ unsigned long *stack,
20050+ unsigned long bp,
20051+ const struct stacktrace_ops *ops,
20052+ void *data,
20053+ unsigned long *end,
20054+ int *graph);
20055
20056-extern unsigned long
20057-print_context_stack(struct thread_info *tinfo,
20058- unsigned long *stack, unsigned long bp,
20059- const struct stacktrace_ops *ops, void *data,
20060- unsigned long *end, int *graph);
20061-
20062-extern unsigned long
20063-print_context_stack_bp(struct thread_info *tinfo,
20064- unsigned long *stack, unsigned long bp,
20065- const struct stacktrace_ops *ops, void *data,
20066- unsigned long *end, int *graph);
20067+extern walk_stack_t print_context_stack;
20068+extern walk_stack_t print_context_stack_bp;
20069
20070 /* Generic stack tracer with callbacks */
20071
20072@@ -40,7 +32,7 @@ struct stacktrace_ops {
20073 void (*address)(void *data, unsigned long address, int reliable);
20074 /* On negative return stop dumping */
20075 int (*stack)(void *data, char *name);
20076- walk_stack_t walk_stack;
20077+ walk_stack_t *walk_stack;
20078 };
20079
20080 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
20081diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
20082index d7f3b3b..3cc39f1 100644
20083--- a/arch/x86/include/asm/switch_to.h
20084+++ b/arch/x86/include/asm/switch_to.h
20085@@ -108,7 +108,7 @@ do { \
20086 "call __switch_to\n\t" \
20087 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
20088 __switch_canary \
20089- "movq %P[thread_info](%%rsi),%%r8\n\t" \
20090+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
20091 "movq %%rax,%%rdi\n\t" \
20092 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
20093 "jnz ret_from_fork\n\t" \
20094@@ -119,7 +119,7 @@ do { \
20095 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
20096 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
20097 [_tif_fork] "i" (_TIF_FORK), \
20098- [thread_info] "i" (offsetof(struct task_struct, stack)), \
20099+ [thread_info] "m" (current_tinfo), \
20100 [current_task] "m" (current_task) \
20101 __switch_canary_iparam \
20102 : "memory", "cc" __EXTRA_CLOBBER)
20103diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
20104index 8540538..4b0b5e9 100644
20105--- a/arch/x86/include/asm/thread_info.h
20106+++ b/arch/x86/include/asm/thread_info.h
20107@@ -24,7 +24,6 @@ struct exec_domain;
20108 #include <linux/atomic.h>
20109
20110 struct thread_info {
20111- struct task_struct *task; /* main task structure */
20112 struct exec_domain *exec_domain; /* execution domain */
20113 __u32 flags; /* low level flags */
20114 __u32 status; /* thread synchronous flags */
20115@@ -33,13 +32,13 @@ struct thread_info {
20116 mm_segment_t addr_limit;
20117 struct restart_block restart_block;
20118 void __user *sysenter_return;
20119+ unsigned long lowest_stack;
20120 unsigned int sig_on_uaccess_error:1;
20121 unsigned int uaccess_err:1; /* uaccess failed */
20122 };
20123
20124-#define INIT_THREAD_INFO(tsk) \
20125+#define INIT_THREAD_INFO \
20126 { \
20127- .task = &tsk, \
20128 .exec_domain = &default_exec_domain, \
20129 .flags = 0, \
20130 .cpu = 0, \
20131@@ -50,7 +49,7 @@ struct thread_info {
20132 }, \
20133 }
20134
20135-#define init_thread_info (init_thread_union.thread_info)
20136+#define init_thread_info (init_thread_union.stack)
20137 #define init_stack (init_thread_union.stack)
20138
20139 #else /* !__ASSEMBLY__ */
20140@@ -91,6 +90,7 @@ struct thread_info {
20141 #define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */
20142 #define TIF_ADDR32 29 /* 32-bit address space on 64 bits */
20143 #define TIF_X32 30 /* 32-bit native x86-64 binary */
20144+#define TIF_GRSEC_SETXID 31 /* update credentials on syscall entry/exit */
20145
20146 #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
20147 #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
20148@@ -115,17 +115,18 @@ struct thread_info {
20149 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
20150 #define _TIF_ADDR32 (1 << TIF_ADDR32)
20151 #define _TIF_X32 (1 << TIF_X32)
20152+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
20153
20154 /* work to do in syscall_trace_enter() */
20155 #define _TIF_WORK_SYSCALL_ENTRY \
20156 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \
20157 _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT | \
20158- _TIF_NOHZ)
20159+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
20160
20161 /* work to do in syscall_trace_leave() */
20162 #define _TIF_WORK_SYSCALL_EXIT \
20163 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \
20164- _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ)
20165+ _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ | _TIF_GRSEC_SETXID)
20166
20167 /* work to do on interrupt/exception return */
20168 #define _TIF_WORK_MASK \
20169@@ -136,7 +137,7 @@ struct thread_info {
20170 /* work to do on any return to user space */
20171 #define _TIF_ALLWORK_MASK \
20172 ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT | \
20173- _TIF_NOHZ)
20174+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
20175
20176 /* Only used for 64 bit */
20177 #define _TIF_DO_NOTIFY_MASK \
20178@@ -151,7 +152,6 @@ struct thread_info {
20179 #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
20180
20181 #define STACK_WARN (THREAD_SIZE/8)
20182-#define KERNEL_STACK_OFFSET (5*(BITS_PER_LONG/8))
20183
20184 /*
20185 * macros/functions for gaining access to the thread information structure
20186@@ -162,26 +162,18 @@ struct thread_info {
20187
20188 DECLARE_PER_CPU(unsigned long, kernel_stack);
20189
20190+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
20191+
20192 static inline struct thread_info *current_thread_info(void)
20193 {
20194- struct thread_info *ti;
20195- ti = (void *)(this_cpu_read_stable(kernel_stack) +
20196- KERNEL_STACK_OFFSET - THREAD_SIZE);
20197- return ti;
20198+ return this_cpu_read_stable(current_tinfo);
20199 }
20200
20201 #else /* !__ASSEMBLY__ */
20202
20203 /* how to get the thread information struct from ASM */
20204 #define GET_THREAD_INFO(reg) \
20205- _ASM_MOV PER_CPU_VAR(kernel_stack),reg ; \
20206- _ASM_SUB $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg ;
20207-
20208-/*
20209- * Same if PER_CPU_VAR(kernel_stack) is, perhaps with some offset, already in
20210- * a certain register (to be used in assembler memory operands).
20211- */
20212-#define THREAD_INFO(reg, off) KERNEL_STACK_OFFSET+(off)-THREAD_SIZE(reg)
20213+ _ASM_MOV PER_CPU_VAR(current_tinfo),reg ;
20214
20215 #endif
20216
20217@@ -237,5 +229,12 @@ static inline bool is_ia32_task(void)
20218 extern void arch_task_cache_init(void);
20219 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
20220 extern void arch_release_task_struct(struct task_struct *tsk);
20221+
20222+#define __HAVE_THREAD_FUNCTIONS
20223+#define task_thread_info(task) (&(task)->tinfo)
20224+#define task_stack_page(task) ((task)->stack)
20225+#define setup_thread_stack(p, org) do {} while (0)
20226+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
20227+
20228 #endif
20229 #endif /* _ASM_X86_THREAD_INFO_H */
20230diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
20231index 04905bf..1178cdf 100644
20232--- a/arch/x86/include/asm/tlbflush.h
20233+++ b/arch/x86/include/asm/tlbflush.h
20234@@ -17,18 +17,44 @@
20235
20236 static inline void __native_flush_tlb(void)
20237 {
20238+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
20239+ u64 descriptor[2];
20240+
20241+ descriptor[0] = PCID_KERNEL;
20242+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_ALL_NONGLOBAL) : "memory");
20243+ return;
20244+ }
20245+
20246+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20247+ if (static_cpu_has(X86_FEATURE_PCID)) {
20248+ unsigned int cpu = raw_get_cpu();
20249+
20250+ native_write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
20251+ native_write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
20252+ raw_put_cpu_no_resched();
20253+ return;
20254+ }
20255+#endif
20256+
20257 native_write_cr3(native_read_cr3());
20258 }
20259
20260 static inline void __native_flush_tlb_global_irq_disabled(void)
20261 {
20262- unsigned long cr4;
20263+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
20264+ u64 descriptor[2];
20265
20266- cr4 = native_read_cr4();
20267- /* clear PGE */
20268- native_write_cr4(cr4 & ~X86_CR4_PGE);
20269- /* write old PGE again and flush TLBs */
20270- native_write_cr4(cr4);
20271+ descriptor[0] = PCID_KERNEL;
20272+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_ALL_GLOBAL) : "memory");
20273+ } else {
20274+ unsigned long cr4;
20275+
20276+ cr4 = native_read_cr4();
20277+ /* clear PGE */
20278+ native_write_cr4(cr4 & ~X86_CR4_PGE);
20279+ /* write old PGE again and flush TLBs */
20280+ native_write_cr4(cr4);
20281+ }
20282 }
20283
20284 static inline void __native_flush_tlb_global(void)
20285@@ -49,6 +75,41 @@ static inline void __native_flush_tlb_global(void)
20286
20287 static inline void __native_flush_tlb_single(unsigned long addr)
20288 {
20289+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
20290+ u64 descriptor[2];
20291+
20292+ descriptor[0] = PCID_KERNEL;
20293+ descriptor[1] = addr;
20294+
20295+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20296+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF) || addr >= TASK_SIZE_MAX) {
20297+ if (addr < TASK_SIZE_MAX)
20298+ descriptor[1] += pax_user_shadow_base;
20299+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_ADDRESS) : "memory");
20300+ }
20301+
20302+ descriptor[0] = PCID_USER;
20303+ descriptor[1] = addr;
20304+#endif
20305+
20306+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_ADDRESS) : "memory");
20307+ return;
20308+ }
20309+
20310+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20311+ if (static_cpu_has(X86_FEATURE_PCID)) {
20312+ unsigned int cpu = raw_get_cpu();
20313+
20314+ native_write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER | PCID_NOFLUSH);
20315+ asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
20316+ native_write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
20317+ raw_put_cpu_no_resched();
20318+
20319+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF) && addr < TASK_SIZE_MAX)
20320+ addr += pax_user_shadow_base;
20321+ }
20322+#endif
20323+
20324 asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
20325 }
20326
20327diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
20328index 0d592e0..526f797 100644
20329--- a/arch/x86/include/asm/uaccess.h
20330+++ b/arch/x86/include/asm/uaccess.h
20331@@ -7,6 +7,7 @@
20332 #include <linux/compiler.h>
20333 #include <linux/thread_info.h>
20334 #include <linux/string.h>
20335+#include <linux/spinlock.h>
20336 #include <asm/asm.h>
20337 #include <asm/page.h>
20338 #include <asm/smap.h>
20339@@ -29,7 +30,12 @@
20340
20341 #define get_ds() (KERNEL_DS)
20342 #define get_fs() (current_thread_info()->addr_limit)
20343+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
20344+void __set_fs(mm_segment_t x);
20345+void set_fs(mm_segment_t x);
20346+#else
20347 #define set_fs(x) (current_thread_info()->addr_limit = (x))
20348+#endif
20349
20350 #define segment_eq(a, b) ((a).seg == (b).seg)
20351
20352@@ -85,8 +91,36 @@ static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, un
20353 * checks that the pointer is in the user space range - after calling
20354 * this function, memory access functions may still return -EFAULT.
20355 */
20356-#define access_ok(type, addr, size) \
20357- likely(!__range_not_ok(addr, size, user_addr_max()))
20358+extern int _cond_resched(void);
20359+#define access_ok_noprefault(type, addr, size) (likely(!__range_not_ok(addr, size, user_addr_max())))
20360+#define access_ok(type, addr, size) \
20361+({ \
20362+ unsigned long __size = size; \
20363+ unsigned long __addr = (unsigned long)addr; \
20364+ bool __ret_ao = __range_not_ok(__addr, __size, user_addr_max()) == 0;\
20365+ if (__ret_ao && __size) { \
20366+ unsigned long __addr_ao = __addr & PAGE_MASK; \
20367+ unsigned long __end_ao = __addr + __size - 1; \
20368+ if (unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
20369+ while (__addr_ao <= __end_ao) { \
20370+ char __c_ao; \
20371+ __addr_ao += PAGE_SIZE; \
20372+ if (__size > PAGE_SIZE) \
20373+ _cond_resched(); \
20374+ if (__get_user(__c_ao, (char __user *)__addr)) \
20375+ break; \
20376+ if (type != VERIFY_WRITE) { \
20377+ __addr = __addr_ao; \
20378+ continue; \
20379+ } \
20380+ if (__put_user(__c_ao, (char __user *)__addr)) \
20381+ break; \
20382+ __addr = __addr_ao; \
20383+ } \
20384+ } \
20385+ } \
20386+ __ret_ao; \
20387+})
20388
20389 /*
20390 * The exception table consists of pairs of addresses relative to the
20391@@ -176,10 +210,12 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
20392 register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \
20393 __chk_user_ptr(ptr); \
20394 might_fault(); \
20395+ pax_open_userland(); \
20396 asm volatile("call __get_user_%P3" \
20397 : "=a" (__ret_gu), "=r" (__val_gu) \
20398 : "0" (ptr), "i" (sizeof(*(ptr)))); \
20399 (x) = (__typeof__(*(ptr))) __val_gu; \
20400+ pax_close_userland(); \
20401 __ret_gu; \
20402 })
20403
20404@@ -187,13 +223,21 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
20405 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
20406 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
20407
20408-
20409+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
20410+#define __copyuser_seg "gs;"
20411+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
20412+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
20413+#else
20414+#define __copyuser_seg
20415+#define __COPYUSER_SET_ES
20416+#define __COPYUSER_RESTORE_ES
20417+#endif
20418
20419 #ifdef CONFIG_X86_32
20420 #define __put_user_asm_u64(x, addr, err, errret) \
20421 asm volatile(ASM_STAC "\n" \
20422- "1: movl %%eax,0(%2)\n" \
20423- "2: movl %%edx,4(%2)\n" \
20424+ "1: "__copyuser_seg"movl %%eax,0(%2)\n" \
20425+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
20426 "3: " ASM_CLAC "\n" \
20427 ".section .fixup,\"ax\"\n" \
20428 "4: movl %3,%0\n" \
20429@@ -206,8 +250,8 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
20430
20431 #define __put_user_asm_ex_u64(x, addr) \
20432 asm volatile(ASM_STAC "\n" \
20433- "1: movl %%eax,0(%1)\n" \
20434- "2: movl %%edx,4(%1)\n" \
20435+ "1: "__copyuser_seg"movl %%eax,0(%1)\n" \
20436+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
20437 "3: " ASM_CLAC "\n" \
20438 _ASM_EXTABLE_EX(1b, 2b) \
20439 _ASM_EXTABLE_EX(2b, 3b) \
20440@@ -257,7 +301,8 @@ extern void __put_user_8(void);
20441 __typeof__(*(ptr)) __pu_val; \
20442 __chk_user_ptr(ptr); \
20443 might_fault(); \
20444- __pu_val = x; \
20445+ __pu_val = (x); \
20446+ pax_open_userland(); \
20447 switch (sizeof(*(ptr))) { \
20448 case 1: \
20449 __put_user_x(1, __pu_val, ptr, __ret_pu); \
20450@@ -275,6 +320,7 @@ extern void __put_user_8(void);
20451 __put_user_x(X, __pu_val, ptr, __ret_pu); \
20452 break; \
20453 } \
20454+ pax_close_userland(); \
20455 __ret_pu; \
20456 })
20457
20458@@ -355,8 +401,10 @@ do { \
20459 } while (0)
20460
20461 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
20462+do { \
20463+ pax_open_userland(); \
20464 asm volatile(ASM_STAC "\n" \
20465- "1: mov"itype" %2,%"rtype"1\n" \
20466+ "1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
20467 "2: " ASM_CLAC "\n" \
20468 ".section .fixup,\"ax\"\n" \
20469 "3: mov %3,%0\n" \
20470@@ -364,8 +412,10 @@ do { \
20471 " jmp 2b\n" \
20472 ".previous\n" \
20473 _ASM_EXTABLE(1b, 3b) \
20474- : "=r" (err), ltype(x) \
20475- : "m" (__m(addr)), "i" (errret), "0" (err))
20476+ : "=r" (err), ltype (x) \
20477+ : "m" (__m(addr)), "i" (errret), "0" (err)); \
20478+ pax_close_userland(); \
20479+} while (0)
20480
20481 #define __get_user_size_ex(x, ptr, size) \
20482 do { \
20483@@ -389,7 +439,7 @@ do { \
20484 } while (0)
20485
20486 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
20487- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
20488+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
20489 "2:\n" \
20490 _ASM_EXTABLE_EX(1b, 2b) \
20491 : ltype(x) : "m" (__m(addr)))
20492@@ -406,13 +456,24 @@ do { \
20493 int __gu_err; \
20494 unsigned long __gu_val; \
20495 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
20496- (x) = (__force __typeof__(*(ptr)))__gu_val; \
20497+ (x) = (__typeof__(*(ptr)))__gu_val; \
20498 __gu_err; \
20499 })
20500
20501 /* FIXME: this hack is definitely wrong -AK */
20502 struct __large_struct { unsigned long buf[100]; };
20503-#define __m(x) (*(struct __large_struct __user *)(x))
20504+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20505+#define ____m(x) \
20506+({ \
20507+ unsigned long ____x = (unsigned long)(x); \
20508+ if (____x < pax_user_shadow_base) \
20509+ ____x += pax_user_shadow_base; \
20510+ (typeof(x))____x; \
20511+})
20512+#else
20513+#define ____m(x) (x)
20514+#endif
20515+#define __m(x) (*(struct __large_struct __user *)____m(x))
20516
20517 /*
20518 * Tell gcc we read from memory instead of writing: this is because
20519@@ -420,8 +481,10 @@ struct __large_struct { unsigned long buf[100]; };
20520 * aliasing issues.
20521 */
20522 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
20523+do { \
20524+ pax_open_userland(); \
20525 asm volatile(ASM_STAC "\n" \
20526- "1: mov"itype" %"rtype"1,%2\n" \
20527+ "1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
20528 "2: " ASM_CLAC "\n" \
20529 ".section .fixup,\"ax\"\n" \
20530 "3: mov %3,%0\n" \
20531@@ -429,10 +492,12 @@ struct __large_struct { unsigned long buf[100]; };
20532 ".previous\n" \
20533 _ASM_EXTABLE(1b, 3b) \
20534 : "=r"(err) \
20535- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
20536+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err));\
20537+ pax_close_userland(); \
20538+} while (0)
20539
20540 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
20541- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
20542+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
20543 "2:\n" \
20544 _ASM_EXTABLE_EX(1b, 2b) \
20545 : : ltype(x), "m" (__m(addr)))
20546@@ -442,11 +507,13 @@ struct __large_struct { unsigned long buf[100]; };
20547 */
20548 #define uaccess_try do { \
20549 current_thread_info()->uaccess_err = 0; \
20550+ pax_open_userland(); \
20551 stac(); \
20552 barrier();
20553
20554 #define uaccess_catch(err) \
20555 clac(); \
20556+ pax_close_userland(); \
20557 (err) |= (current_thread_info()->uaccess_err ? -EFAULT : 0); \
20558 } while (0)
20559
20560@@ -471,8 +538,12 @@ struct __large_struct { unsigned long buf[100]; };
20561 * On error, the variable @x is set to zero.
20562 */
20563
20564+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20565+#define __get_user(x, ptr) get_user((x), (ptr))
20566+#else
20567 #define __get_user(x, ptr) \
20568 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
20569+#endif
20570
20571 /**
20572 * __put_user: - Write a simple value into user space, with less checking.
20573@@ -494,8 +565,12 @@ struct __large_struct { unsigned long buf[100]; };
20574 * Returns zero on success, or -EFAULT on error.
20575 */
20576
20577+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20578+#define __put_user(x, ptr) put_user((x), (ptr))
20579+#else
20580 #define __put_user(x, ptr) \
20581 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
20582+#endif
20583
20584 #define __get_user_unaligned __get_user
20585 #define __put_user_unaligned __put_user
20586@@ -513,7 +588,7 @@ struct __large_struct { unsigned long buf[100]; };
20587 #define get_user_ex(x, ptr) do { \
20588 unsigned long __gue_val; \
20589 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
20590- (x) = (__force __typeof__(*(ptr)))__gue_val; \
20591+ (x) = (__typeof__(*(ptr)))__gue_val; \
20592 } while (0)
20593
20594 #define put_user_try uaccess_try
20595@@ -542,18 +617,19 @@ extern void __cmpxchg_wrong_size(void)
20596 __typeof__(ptr) __uval = (uval); \
20597 __typeof__(*(ptr)) __old = (old); \
20598 __typeof__(*(ptr)) __new = (new); \
20599+ pax_open_userland(); \
20600 switch (size) { \
20601 case 1: \
20602 { \
20603 asm volatile("\t" ASM_STAC "\n" \
20604- "1:\t" LOCK_PREFIX "cmpxchgb %4, %2\n" \
20605+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgb %4, %2\n"\
20606 "2:\t" ASM_CLAC "\n" \
20607 "\t.section .fixup, \"ax\"\n" \
20608 "3:\tmov %3, %0\n" \
20609 "\tjmp 2b\n" \
20610 "\t.previous\n" \
20611 _ASM_EXTABLE(1b, 3b) \
20612- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
20613+ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
20614 : "i" (-EFAULT), "q" (__new), "1" (__old) \
20615 : "memory" \
20616 ); \
20617@@ -562,14 +638,14 @@ extern void __cmpxchg_wrong_size(void)
20618 case 2: \
20619 { \
20620 asm volatile("\t" ASM_STAC "\n" \
20621- "1:\t" LOCK_PREFIX "cmpxchgw %4, %2\n" \
20622+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgw %4, %2\n"\
20623 "2:\t" ASM_CLAC "\n" \
20624 "\t.section .fixup, \"ax\"\n" \
20625 "3:\tmov %3, %0\n" \
20626 "\tjmp 2b\n" \
20627 "\t.previous\n" \
20628 _ASM_EXTABLE(1b, 3b) \
20629- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
20630+ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
20631 : "i" (-EFAULT), "r" (__new), "1" (__old) \
20632 : "memory" \
20633 ); \
20634@@ -578,14 +654,14 @@ extern void __cmpxchg_wrong_size(void)
20635 case 4: \
20636 { \
20637 asm volatile("\t" ASM_STAC "\n" \
20638- "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" \
20639+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"\
20640 "2:\t" ASM_CLAC "\n" \
20641 "\t.section .fixup, \"ax\"\n" \
20642 "3:\tmov %3, %0\n" \
20643 "\tjmp 2b\n" \
20644 "\t.previous\n" \
20645 _ASM_EXTABLE(1b, 3b) \
20646- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
20647+ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
20648 : "i" (-EFAULT), "r" (__new), "1" (__old) \
20649 : "memory" \
20650 ); \
20651@@ -597,14 +673,14 @@ extern void __cmpxchg_wrong_size(void)
20652 __cmpxchg_wrong_size(); \
20653 \
20654 asm volatile("\t" ASM_STAC "\n" \
20655- "1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n" \
20656+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgq %4, %2\n"\
20657 "2:\t" ASM_CLAC "\n" \
20658 "\t.section .fixup, \"ax\"\n" \
20659 "3:\tmov %3, %0\n" \
20660 "\tjmp 2b\n" \
20661 "\t.previous\n" \
20662 _ASM_EXTABLE(1b, 3b) \
20663- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
20664+ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
20665 : "i" (-EFAULT), "r" (__new), "1" (__old) \
20666 : "memory" \
20667 ); \
20668@@ -613,6 +689,7 @@ extern void __cmpxchg_wrong_size(void)
20669 default: \
20670 __cmpxchg_wrong_size(); \
20671 } \
20672+ pax_close_userland(); \
20673 *__uval = __old; \
20674 __ret; \
20675 })
20676@@ -636,17 +713,6 @@ extern struct movsl_mask {
20677
20678 #define ARCH_HAS_NOCACHE_UACCESS 1
20679
20680-#ifdef CONFIG_X86_32
20681-# include <asm/uaccess_32.h>
20682-#else
20683-# include <asm/uaccess_64.h>
20684-#endif
20685-
20686-unsigned long __must_check _copy_from_user(void *to, const void __user *from,
20687- unsigned n);
20688-unsigned long __must_check _copy_to_user(void __user *to, const void *from,
20689- unsigned n);
20690-
20691 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
20692 # define copy_user_diag __compiletime_error
20693 #else
20694@@ -656,7 +722,7 @@ unsigned long __must_check _copy_to_user(void __user *to, const void *from,
20695 extern void copy_user_diag("copy_from_user() buffer size is too small")
20696 copy_from_user_overflow(void);
20697 extern void copy_user_diag("copy_to_user() buffer size is too small")
20698-copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
20699+copy_to_user_overflow(void);
20700
20701 #undef copy_user_diag
20702
20703@@ -669,7 +735,7 @@ __copy_from_user_overflow(void) __asm__("copy_from_user_overflow");
20704
20705 extern void
20706 __compiletime_warning("copy_to_user() buffer size is not provably correct")
20707-__copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
20708+__copy_to_user_overflow(void) __asm__("copy_to_user_overflow");
20709 #define __copy_to_user_overflow(size, count) __copy_to_user_overflow()
20710
20711 #else
20712@@ -684,10 +750,16 @@ __copy_from_user_overflow(int size, unsigned long count)
20713
20714 #endif
20715
20716+#ifdef CONFIG_X86_32
20717+# include <asm/uaccess_32.h>
20718+#else
20719+# include <asm/uaccess_64.h>
20720+#endif
20721+
20722 static inline unsigned long __must_check
20723 copy_from_user(void *to, const void __user *from, unsigned long n)
20724 {
20725- int sz = __compiletime_object_size(to);
20726+ size_t sz = __compiletime_object_size(to);
20727
20728 might_fault();
20729
20730@@ -709,12 +781,15 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
20731 * case, and do only runtime checking for non-constant sizes.
20732 */
20733
20734- if (likely(sz < 0 || sz >= n))
20735- n = _copy_from_user(to, from, n);
20736- else if(__builtin_constant_p(n))
20737- copy_from_user_overflow();
20738- else
20739- __copy_from_user_overflow(sz, n);
20740+ if (likely(sz != (size_t)-1 && sz < n)) {
20741+ if(__builtin_constant_p(n))
20742+ copy_from_user_overflow();
20743+ else
20744+ __copy_from_user_overflow(sz, n);
20745+ } else if (access_ok(VERIFY_READ, from, n))
20746+ n = __copy_from_user(to, from, n);
20747+ else if ((long)n > 0)
20748+ memset(to, 0, n);
20749
20750 return n;
20751 }
20752@@ -722,17 +797,18 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
20753 static inline unsigned long __must_check
20754 copy_to_user(void __user *to, const void *from, unsigned long n)
20755 {
20756- int sz = __compiletime_object_size(from);
20757+ size_t sz = __compiletime_object_size(from);
20758
20759 might_fault();
20760
20761 /* See the comment in copy_from_user() above. */
20762- if (likely(sz < 0 || sz >= n))
20763- n = _copy_to_user(to, from, n);
20764- else if(__builtin_constant_p(n))
20765- copy_to_user_overflow();
20766- else
20767- __copy_to_user_overflow(sz, n);
20768+ if (likely(sz != (size_t)-1 && sz < n)) {
20769+ if(__builtin_constant_p(n))
20770+ copy_to_user_overflow();
20771+ else
20772+ __copy_to_user_overflow(sz, n);
20773+ } else if (access_ok(VERIFY_WRITE, to, n))
20774+ n = __copy_to_user(to, from, n);
20775
20776 return n;
20777 }
20778diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
20779index 3c03a5d..1071638 100644
20780--- a/arch/x86/include/asm/uaccess_32.h
20781+++ b/arch/x86/include/asm/uaccess_32.h
20782@@ -43,6 +43,11 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
20783 static __always_inline unsigned long __must_check
20784 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
20785 {
20786+ if ((long)n < 0)
20787+ return n;
20788+
20789+ check_object_size(from, n, true);
20790+
20791 if (__builtin_constant_p(n)) {
20792 unsigned long ret;
20793
20794@@ -82,12 +87,16 @@ static __always_inline unsigned long __must_check
20795 __copy_to_user(void __user *to, const void *from, unsigned long n)
20796 {
20797 might_fault();
20798+
20799 return __copy_to_user_inatomic(to, from, n);
20800 }
20801
20802 static __always_inline unsigned long
20803 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
20804 {
20805+ if ((long)n < 0)
20806+ return n;
20807+
20808 /* Avoid zeroing the tail if the copy fails..
20809 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
20810 * but as the zeroing behaviour is only significant when n is not
20811@@ -137,6 +146,12 @@ static __always_inline unsigned long
20812 __copy_from_user(void *to, const void __user *from, unsigned long n)
20813 {
20814 might_fault();
20815+
20816+ if ((long)n < 0)
20817+ return n;
20818+
20819+ check_object_size(to, n, false);
20820+
20821 if (__builtin_constant_p(n)) {
20822 unsigned long ret;
20823
20824@@ -159,6 +174,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
20825 const void __user *from, unsigned long n)
20826 {
20827 might_fault();
20828+
20829+ if ((long)n < 0)
20830+ return n;
20831+
20832 if (__builtin_constant_p(n)) {
20833 unsigned long ret;
20834
20835@@ -181,7 +200,10 @@ static __always_inline unsigned long
20836 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
20837 unsigned long n)
20838 {
20839- return __copy_from_user_ll_nocache_nozero(to, from, n);
20840+ if ((long)n < 0)
20841+ return n;
20842+
20843+ return __copy_from_user_ll_nocache_nozero(to, from, n);
20844 }
20845
20846 #endif /* _ASM_X86_UACCESS_32_H */
20847diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
20848index 12a26b9..206c200 100644
20849--- a/arch/x86/include/asm/uaccess_64.h
20850+++ b/arch/x86/include/asm/uaccess_64.h
20851@@ -10,6 +10,9 @@
20852 #include <asm/alternative.h>
20853 #include <asm/cpufeature.h>
20854 #include <asm/page.h>
20855+#include <asm/pgtable.h>
20856+
20857+#define set_fs(x) (current_thread_info()->addr_limit = (x))
20858
20859 /*
20860 * Copy To/From Userspace
20861@@ -17,14 +20,14 @@
20862
20863 /* Handles exceptions in both to and from, but doesn't do access_ok */
20864 __must_check unsigned long
20865-copy_user_enhanced_fast_string(void *to, const void *from, unsigned len);
20866+copy_user_enhanced_fast_string(void *to, const void *from, unsigned len) __size_overflow(3);
20867 __must_check unsigned long
20868-copy_user_generic_string(void *to, const void *from, unsigned len);
20869+copy_user_generic_string(void *to, const void *from, unsigned len) __size_overflow(3);
20870 __must_check unsigned long
20871-copy_user_generic_unrolled(void *to, const void *from, unsigned len);
20872+copy_user_generic_unrolled(void *to, const void *from, unsigned len) __size_overflow(3);
20873
20874 static __always_inline __must_check unsigned long
20875-copy_user_generic(void *to, const void *from, unsigned len)
20876+copy_user_generic(void *to, const void *from, unsigned long len)
20877 {
20878 unsigned ret;
20879
20880@@ -46,121 +49,170 @@ copy_user_generic(void *to, const void *from, unsigned len)
20881 }
20882
20883 __must_check unsigned long
20884-copy_in_user(void __user *to, const void __user *from, unsigned len);
20885+copy_in_user(void __user *to, const void __user *from, unsigned long len);
20886
20887 static __always_inline __must_check
20888-int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size)
20889+unsigned long __copy_from_user_nocheck(void *dst, const void __user *src, unsigned long size)
20890 {
20891- int ret = 0;
20892+ size_t sz = __compiletime_object_size(dst);
20893+ unsigned ret = 0;
20894+
20895+ if (size > INT_MAX)
20896+ return size;
20897+
20898+ check_object_size(dst, size, false);
20899+
20900+#ifdef CONFIG_PAX_MEMORY_UDEREF
20901+ if (!access_ok_noprefault(VERIFY_READ, src, size))
20902+ return size;
20903+#endif
20904+
20905+ if (unlikely(sz != (size_t)-1 && sz < size)) {
20906+ if(__builtin_constant_p(size))
20907+ copy_from_user_overflow();
20908+ else
20909+ __copy_from_user_overflow(sz, size);
20910+ return size;
20911+ }
20912
20913 if (!__builtin_constant_p(size))
20914- return copy_user_generic(dst, (__force void *)src, size);
20915+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
20916 switch (size) {
20917- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
20918+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
20919 ret, "b", "b", "=q", 1);
20920 return ret;
20921- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
20922+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
20923 ret, "w", "w", "=r", 2);
20924 return ret;
20925- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
20926+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
20927 ret, "l", "k", "=r", 4);
20928 return ret;
20929- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
20930+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
20931 ret, "q", "", "=r", 8);
20932 return ret;
20933 case 10:
20934- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
20935+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
20936 ret, "q", "", "=r", 10);
20937 if (unlikely(ret))
20938 return ret;
20939 __get_user_asm(*(u16 *)(8 + (char *)dst),
20940- (u16 __user *)(8 + (char __user *)src),
20941+ (const u16 __user *)(8 + (const char __user *)src),
20942 ret, "w", "w", "=r", 2);
20943 return ret;
20944 case 16:
20945- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
20946+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
20947 ret, "q", "", "=r", 16);
20948 if (unlikely(ret))
20949 return ret;
20950 __get_user_asm(*(u64 *)(8 + (char *)dst),
20951- (u64 __user *)(8 + (char __user *)src),
20952+ (const u64 __user *)(8 + (const char __user *)src),
20953 ret, "q", "", "=r", 8);
20954 return ret;
20955 default:
20956- return copy_user_generic(dst, (__force void *)src, size);
20957+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
20958 }
20959 }
20960
20961 static __always_inline __must_check
20962-int __copy_from_user(void *dst, const void __user *src, unsigned size)
20963+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
20964 {
20965 might_fault();
20966 return __copy_from_user_nocheck(dst, src, size);
20967 }
20968
20969 static __always_inline __must_check
20970-int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size)
20971+unsigned long __copy_to_user_nocheck(void __user *dst, const void *src, unsigned long size)
20972 {
20973- int ret = 0;
20974+ size_t sz = __compiletime_object_size(src);
20975+ unsigned ret = 0;
20976+
20977+ if (size > INT_MAX)
20978+ return size;
20979+
20980+ check_object_size(src, size, true);
20981+
20982+#ifdef CONFIG_PAX_MEMORY_UDEREF
20983+ if (!access_ok_noprefault(VERIFY_WRITE, dst, size))
20984+ return size;
20985+#endif
20986+
20987+ if (unlikely(sz != (size_t)-1 && sz < size)) {
20988+ if(__builtin_constant_p(size))
20989+ copy_to_user_overflow();
20990+ else
20991+ __copy_to_user_overflow(sz, size);
20992+ return size;
20993+ }
20994
20995 if (!__builtin_constant_p(size))
20996- return copy_user_generic((__force void *)dst, src, size);
20997+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
20998 switch (size) {
20999- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
21000+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
21001 ret, "b", "b", "iq", 1);
21002 return ret;
21003- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
21004+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
21005 ret, "w", "w", "ir", 2);
21006 return ret;
21007- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
21008+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
21009 ret, "l", "k", "ir", 4);
21010 return ret;
21011- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
21012+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
21013 ret, "q", "", "er", 8);
21014 return ret;
21015 case 10:
21016- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
21017+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
21018 ret, "q", "", "er", 10);
21019 if (unlikely(ret))
21020 return ret;
21021 asm("":::"memory");
21022- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
21023+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
21024 ret, "w", "w", "ir", 2);
21025 return ret;
21026 case 16:
21027- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
21028+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
21029 ret, "q", "", "er", 16);
21030 if (unlikely(ret))
21031 return ret;
21032 asm("":::"memory");
21033- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
21034+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
21035 ret, "q", "", "er", 8);
21036 return ret;
21037 default:
21038- return copy_user_generic((__force void *)dst, src, size);
21039+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
21040 }
21041 }
21042
21043 static __always_inline __must_check
21044-int __copy_to_user(void __user *dst, const void *src, unsigned size)
21045+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
21046 {
21047 might_fault();
21048 return __copy_to_user_nocheck(dst, src, size);
21049 }
21050
21051 static __always_inline __must_check
21052-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
21053+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size)
21054 {
21055- int ret = 0;
21056+ unsigned ret = 0;
21057
21058 might_fault();
21059+
21060+ if (size > INT_MAX)
21061+ return size;
21062+
21063+#ifdef CONFIG_PAX_MEMORY_UDEREF
21064+ if (!access_ok_noprefault(VERIFY_READ, src, size))
21065+ return size;
21066+ if (!access_ok_noprefault(VERIFY_WRITE, dst, size))
21067+ return size;
21068+#endif
21069+
21070 if (!__builtin_constant_p(size))
21071- return copy_user_generic((__force void *)dst,
21072- (__force void *)src, size);
21073+ return copy_user_generic((__force_kernel void *)____m(dst),
21074+ (__force_kernel const void *)____m(src), size);
21075 switch (size) {
21076 case 1: {
21077 u8 tmp;
21078- __get_user_asm(tmp, (u8 __user *)src,
21079+ __get_user_asm(tmp, (const u8 __user *)src,
21080 ret, "b", "b", "=q", 1);
21081 if (likely(!ret))
21082 __put_user_asm(tmp, (u8 __user *)dst,
21083@@ -169,7 +221,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
21084 }
21085 case 2: {
21086 u16 tmp;
21087- __get_user_asm(tmp, (u16 __user *)src,
21088+ __get_user_asm(tmp, (const u16 __user *)src,
21089 ret, "w", "w", "=r", 2);
21090 if (likely(!ret))
21091 __put_user_asm(tmp, (u16 __user *)dst,
21092@@ -179,7 +231,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
21093
21094 case 4: {
21095 u32 tmp;
21096- __get_user_asm(tmp, (u32 __user *)src,
21097+ __get_user_asm(tmp, (const u32 __user *)src,
21098 ret, "l", "k", "=r", 4);
21099 if (likely(!ret))
21100 __put_user_asm(tmp, (u32 __user *)dst,
21101@@ -188,7 +240,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
21102 }
21103 case 8: {
21104 u64 tmp;
21105- __get_user_asm(tmp, (u64 __user *)src,
21106+ __get_user_asm(tmp, (const u64 __user *)src,
21107 ret, "q", "", "=r", 8);
21108 if (likely(!ret))
21109 __put_user_asm(tmp, (u64 __user *)dst,
21110@@ -196,41 +248,58 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
21111 return ret;
21112 }
21113 default:
21114- return copy_user_generic((__force void *)dst,
21115- (__force void *)src, size);
21116+ return copy_user_generic((__force_kernel void *)____m(dst),
21117+ (__force_kernel const void *)____m(src), size);
21118 }
21119 }
21120
21121-static __must_check __always_inline int
21122-__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
21123+static __must_check __always_inline unsigned long
21124+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
21125 {
21126 return __copy_from_user_nocheck(dst, src, size);
21127 }
21128
21129-static __must_check __always_inline int
21130-__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
21131+static __must_check __always_inline unsigned long
21132+__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
21133 {
21134 return __copy_to_user_nocheck(dst, src, size);
21135 }
21136
21137-extern long __copy_user_nocache(void *dst, const void __user *src,
21138- unsigned size, int zerorest);
21139+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
21140+ unsigned long size, int zerorest);
21141
21142-static inline int
21143-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
21144+static inline unsigned long
21145+__copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
21146 {
21147 might_fault();
21148+
21149+ if (size > INT_MAX)
21150+ return size;
21151+
21152+#ifdef CONFIG_PAX_MEMORY_UDEREF
21153+ if (!access_ok_noprefault(VERIFY_READ, src, size))
21154+ return size;
21155+#endif
21156+
21157 return __copy_user_nocache(dst, src, size, 1);
21158 }
21159
21160-static inline int
21161+static inline unsigned long
21162 __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
21163- unsigned size)
21164+ unsigned long size)
21165 {
21166+ if (size > INT_MAX)
21167+ return size;
21168+
21169+#ifdef CONFIG_PAX_MEMORY_UDEREF
21170+ if (!access_ok_noprefault(VERIFY_READ, src, size))
21171+ return size;
21172+#endif
21173+
21174 return __copy_user_nocache(dst, src, size, 0);
21175 }
21176
21177 unsigned long
21178-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
21179+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest) __size_overflow(3);
21180
21181 #endif /* _ASM_X86_UACCESS_64_H */
21182diff --git a/arch/x86/include/asm/word-at-a-time.h b/arch/x86/include/asm/word-at-a-time.h
21183index 5b238981..77fdd78 100644
21184--- a/arch/x86/include/asm/word-at-a-time.h
21185+++ b/arch/x86/include/asm/word-at-a-time.h
21186@@ -11,7 +11,7 @@
21187 * and shift, for example.
21188 */
21189 struct word_at_a_time {
21190- const unsigned long one_bits, high_bits;
21191+ unsigned long one_bits, high_bits;
21192 };
21193
21194 #define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
21195diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
21196index e45e4da..44e8572 100644
21197--- a/arch/x86/include/asm/x86_init.h
21198+++ b/arch/x86/include/asm/x86_init.h
21199@@ -129,7 +129,7 @@ struct x86_init_ops {
21200 struct x86_init_timers timers;
21201 struct x86_init_iommu iommu;
21202 struct x86_init_pci pci;
21203-};
21204+} __no_const;
21205
21206 /**
21207 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
21208@@ -140,7 +140,7 @@ struct x86_cpuinit_ops {
21209 void (*setup_percpu_clockev)(void);
21210 void (*early_percpu_clock_init)(void);
21211 void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node);
21212-};
21213+} __no_const;
21214
21215 struct timespec;
21216
21217@@ -168,7 +168,7 @@ struct x86_platform_ops {
21218 void (*save_sched_clock_state)(void);
21219 void (*restore_sched_clock_state)(void);
21220 void (*apic_post_init)(void);
21221-};
21222+} __no_const;
21223
21224 struct pci_dev;
21225 struct msi_msg;
21226@@ -185,7 +185,7 @@ struct x86_msi_ops {
21227 int (*setup_hpet_msi)(unsigned int irq, unsigned int id);
21228 u32 (*msi_mask_irq)(struct msi_desc *desc, u32 mask, u32 flag);
21229 u32 (*msix_mask_irq)(struct msi_desc *desc, u32 flag);
21230-};
21231+} __no_const;
21232
21233 struct IO_APIC_route_entry;
21234 struct io_apic_irq_attr;
21235@@ -206,7 +206,7 @@ struct x86_io_apic_ops {
21236 unsigned int destination, int vector,
21237 struct io_apic_irq_attr *attr);
21238 void (*eoi_ioapic_pin)(int apic, int pin, int vector);
21239-};
21240+} __no_const;
21241
21242 extern struct x86_init_ops x86_init;
21243 extern struct x86_cpuinit_ops x86_cpuinit;
21244diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
21245index c949923..c22bfa4 100644
21246--- a/arch/x86/include/asm/xen/page.h
21247+++ b/arch/x86/include/asm/xen/page.h
21248@@ -63,7 +63,7 @@ extern int m2p_remove_override(struct page *page,
21249 extern struct page *m2p_find_override(unsigned long mfn);
21250 extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn);
21251
21252-static inline unsigned long pfn_to_mfn(unsigned long pfn)
21253+static inline unsigned long __intentional_overflow(-1) pfn_to_mfn(unsigned long pfn)
21254 {
21255 unsigned long mfn;
21256
21257diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
21258index 7e7a79a..0824666 100644
21259--- a/arch/x86/include/asm/xsave.h
21260+++ b/arch/x86/include/asm/xsave.h
21261@@ -228,12 +228,16 @@ static inline int xsave_user(struct xsave_struct __user *buf)
21262 if (unlikely(err))
21263 return -EFAULT;
21264
21265+ pax_open_userland();
21266 __asm__ __volatile__(ASM_STAC "\n"
21267- "1:"XSAVE"\n"
21268+ "1:"
21269+ __copyuser_seg
21270+ XSAVE"\n"
21271 "2: " ASM_CLAC "\n"
21272 xstate_fault
21273 : "D" (buf), "a" (-1), "d" (-1), "0" (0)
21274 : "memory");
21275+ pax_close_userland();
21276 return err;
21277 }
21278
21279@@ -243,16 +247,20 @@ static inline int xsave_user(struct xsave_struct __user *buf)
21280 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
21281 {
21282 int err = 0;
21283- struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
21284+ struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
21285 u32 lmask = mask;
21286 u32 hmask = mask >> 32;
21287
21288+ pax_open_userland();
21289 __asm__ __volatile__(ASM_STAC "\n"
21290- "1:"XRSTOR"\n"
21291+ "1:"
21292+ __copyuser_seg
21293+ XRSTOR"\n"
21294 "2: " ASM_CLAC "\n"
21295 xstate_fault
21296 : "D" (xstate), "a" (lmask), "d" (hmask), "0" (0)
21297 : "memory"); /* memory required? */
21298+ pax_close_userland();
21299 return err;
21300 }
21301
21302diff --git a/arch/x86/include/uapi/asm/e820.h b/arch/x86/include/uapi/asm/e820.h
21303index bbae024..e1528f9 100644
21304--- a/arch/x86/include/uapi/asm/e820.h
21305+++ b/arch/x86/include/uapi/asm/e820.h
21306@@ -63,7 +63,7 @@ struct e820map {
21307 #define ISA_START_ADDRESS 0xa0000
21308 #define ISA_END_ADDRESS 0x100000
21309
21310-#define BIOS_BEGIN 0x000a0000
21311+#define BIOS_BEGIN 0x000c0000
21312 #define BIOS_END 0x00100000
21313
21314 #define BIOS_ROM_BASE 0xffe00000
21315diff --git a/arch/x86/include/uapi/asm/ptrace-abi.h b/arch/x86/include/uapi/asm/ptrace-abi.h
21316index 7b0a55a..ad115bf 100644
21317--- a/arch/x86/include/uapi/asm/ptrace-abi.h
21318+++ b/arch/x86/include/uapi/asm/ptrace-abi.h
21319@@ -49,7 +49,6 @@
21320 #define EFLAGS 144
21321 #define RSP 152
21322 #define SS 160
21323-#define ARGOFFSET R11
21324 #endif /* __ASSEMBLY__ */
21325
21326 /* top of stack page */
21327diff --git a/arch/x86/include/uapi/asm/vmx.h b/arch/x86/include/uapi/asm/vmx.h
21328index 0e79420..990a2fe 100644
21329--- a/arch/x86/include/uapi/asm/vmx.h
21330+++ b/arch/x86/include/uapi/asm/vmx.h
21331@@ -67,6 +67,7 @@
21332 #define EXIT_REASON_EPT_MISCONFIG 49
21333 #define EXIT_REASON_INVEPT 50
21334 #define EXIT_REASON_PREEMPTION_TIMER 52
21335+#define EXIT_REASON_INVVPID 53
21336 #define EXIT_REASON_WBINVD 54
21337 #define EXIT_REASON_XSETBV 55
21338 #define EXIT_REASON_APIC_WRITE 56
21339@@ -114,6 +115,7 @@
21340 { EXIT_REASON_EOI_INDUCED, "EOI_INDUCED" }, \
21341 { EXIT_REASON_INVALID_STATE, "INVALID_STATE" }, \
21342 { EXIT_REASON_INVD, "INVD" }, \
21343+ { EXIT_REASON_INVVPID, "INVVPID" }, \
21344 { EXIT_REASON_INVPCID, "INVPCID" }
21345
21346 #endif /* _UAPIVMX_H */
21347diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
21348index ada2e2d..ca69e16 100644
21349--- a/arch/x86/kernel/Makefile
21350+++ b/arch/x86/kernel/Makefile
21351@@ -24,7 +24,7 @@ obj-y += time.o ioport.o ldt.o dumpstack.o nmi.o
21352 obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o
21353 obj-$(CONFIG_IRQ_WORK) += irq_work.o
21354 obj-y += probe_roms.o
21355-obj-$(CONFIG_X86_32) += i386_ksyms_32.o
21356+obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o
21357 obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
21358 obj-$(CONFIG_X86_64) += mcount_64.o
21359 obj-y += syscall_$(BITS).o vsyscall_gtod.o
21360diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
21361index b436fc7..1ba7044 100644
21362--- a/arch/x86/kernel/acpi/boot.c
21363+++ b/arch/x86/kernel/acpi/boot.c
21364@@ -1272,7 +1272,7 @@ static int __init dmi_ignore_irq0_timer_override(const struct dmi_system_id *d)
21365 * If your system is blacklisted here, but you find that acpi=force
21366 * works for you, please contact linux-acpi@vger.kernel.org
21367 */
21368-static struct dmi_system_id __initdata acpi_dmi_table[] = {
21369+static const struct dmi_system_id __initconst acpi_dmi_table[] = {
21370 /*
21371 * Boxes that need ACPI disabled
21372 */
21373@@ -1347,7 +1347,7 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = {
21374 };
21375
21376 /* second table for DMI checks that should run after early-quirks */
21377-static struct dmi_system_id __initdata acpi_dmi_table_late[] = {
21378+static const struct dmi_system_id __initconst acpi_dmi_table_late[] = {
21379 /*
21380 * HP laptops which use a DSDT reporting as HP/SB400/10000,
21381 * which includes some code which overrides all temperature
21382diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
21383index 3136820..e2c6577 100644
21384--- a/arch/x86/kernel/acpi/sleep.c
21385+++ b/arch/x86/kernel/acpi/sleep.c
21386@@ -99,8 +99,12 @@ int x86_acpi_suspend_lowlevel(void)
21387 #else /* CONFIG_64BIT */
21388 #ifdef CONFIG_SMP
21389 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
21390+
21391+ pax_open_kernel();
21392 early_gdt_descr.address =
21393 (unsigned long)get_cpu_gdt_table(smp_processor_id());
21394+ pax_close_kernel();
21395+
21396 initial_gs = per_cpu_offset(smp_processor_id());
21397 #endif
21398 initial_code = (unsigned long)wakeup_long64;
21399diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
21400index 665c6b7..eae4d56 100644
21401--- a/arch/x86/kernel/acpi/wakeup_32.S
21402+++ b/arch/x86/kernel/acpi/wakeup_32.S
21403@@ -29,13 +29,11 @@ wakeup_pmode_return:
21404 # and restore the stack ... but you need gdt for this to work
21405 movl saved_context_esp, %esp
21406
21407- movl %cs:saved_magic, %eax
21408- cmpl $0x12345678, %eax
21409+ cmpl $0x12345678, saved_magic
21410 jne bogus_magic
21411
21412 # jump to place where we left off
21413- movl saved_eip, %eax
21414- jmp *%eax
21415+ jmp *(saved_eip)
21416
21417 bogus_magic:
21418 jmp bogus_magic
21419diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
21420index 703130f..27a155d 100644
21421--- a/arch/x86/kernel/alternative.c
21422+++ b/arch/x86/kernel/alternative.c
21423@@ -268,6 +268,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
21424 */
21425 for (a = start; a < end; a++) {
21426 instr = (u8 *)&a->instr_offset + a->instr_offset;
21427+
21428+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21429+ instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
21430+ if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
21431+ instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
21432+#endif
21433+
21434 replacement = (u8 *)&a->repl_offset + a->repl_offset;
21435 BUG_ON(a->replacementlen > a->instrlen);
21436 BUG_ON(a->instrlen > sizeof(insnbuf));
21437@@ -284,6 +291,11 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
21438 add_nops(insnbuf + a->replacementlen,
21439 a->instrlen - a->replacementlen);
21440
21441+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21442+ if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
21443+ instr = ktva_ktla(instr);
21444+#endif
21445+
21446 text_poke_early(instr, insnbuf, a->instrlen);
21447 }
21448 }
21449@@ -299,10 +311,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
21450 for (poff = start; poff < end; poff++) {
21451 u8 *ptr = (u8 *)poff + *poff;
21452
21453+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21454+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
21455+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
21456+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
21457+#endif
21458+
21459 if (!*poff || ptr < text || ptr >= text_end)
21460 continue;
21461 /* turn DS segment override prefix into lock prefix */
21462- if (*ptr == 0x3e)
21463+ if (*ktla_ktva(ptr) == 0x3e)
21464 text_poke(ptr, ((unsigned char []){0xf0}), 1);
21465 }
21466 mutex_unlock(&text_mutex);
21467@@ -317,10 +335,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
21468 for (poff = start; poff < end; poff++) {
21469 u8 *ptr = (u8 *)poff + *poff;
21470
21471+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21472+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
21473+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
21474+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
21475+#endif
21476+
21477 if (!*poff || ptr < text || ptr >= text_end)
21478 continue;
21479 /* turn lock prefix into DS segment override prefix */
21480- if (*ptr == 0xf0)
21481+ if (*ktla_ktva(ptr) == 0xf0)
21482 text_poke(ptr, ((unsigned char []){0x3E}), 1);
21483 }
21484 mutex_unlock(&text_mutex);
21485@@ -457,7 +481,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
21486
21487 BUG_ON(p->len > MAX_PATCH_LEN);
21488 /* prep the buffer with the original instructions */
21489- memcpy(insnbuf, p->instr, p->len);
21490+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
21491 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
21492 (unsigned long)p->instr, p->len);
21493
21494@@ -504,7 +528,7 @@ void __init alternative_instructions(void)
21495 if (!uniproc_patched || num_possible_cpus() == 1)
21496 free_init_pages("SMP alternatives",
21497 (unsigned long)__smp_locks,
21498- (unsigned long)__smp_locks_end);
21499+ PAGE_ALIGN((unsigned long)__smp_locks_end));
21500 #endif
21501
21502 apply_paravirt(__parainstructions, __parainstructions_end);
21503@@ -524,13 +548,17 @@ void __init alternative_instructions(void)
21504 * instructions. And on the local CPU you need to be protected again NMI or MCE
21505 * handlers seeing an inconsistent instruction while you patch.
21506 */
21507-void *__init_or_module text_poke_early(void *addr, const void *opcode,
21508+void *__kprobes text_poke_early(void *addr, const void *opcode,
21509 size_t len)
21510 {
21511 unsigned long flags;
21512 local_irq_save(flags);
21513- memcpy(addr, opcode, len);
21514+
21515+ pax_open_kernel();
21516+ memcpy(ktla_ktva(addr), opcode, len);
21517 sync_core();
21518+ pax_close_kernel();
21519+
21520 local_irq_restore(flags);
21521 /* Could also do a CLFLUSH here to speed up CPU recovery; but
21522 that causes hangs on some VIA CPUs. */
21523@@ -552,36 +580,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
21524 */
21525 void *text_poke(void *addr, const void *opcode, size_t len)
21526 {
21527- unsigned long flags;
21528- char *vaddr;
21529+ unsigned char *vaddr = ktla_ktva(addr);
21530 struct page *pages[2];
21531- int i;
21532+ size_t i;
21533
21534 if (!core_kernel_text((unsigned long)addr)) {
21535- pages[0] = vmalloc_to_page(addr);
21536- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
21537+ pages[0] = vmalloc_to_page(vaddr);
21538+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
21539 } else {
21540- pages[0] = virt_to_page(addr);
21541+ pages[0] = virt_to_page(vaddr);
21542 WARN_ON(!PageReserved(pages[0]));
21543- pages[1] = virt_to_page(addr + PAGE_SIZE);
21544+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
21545 }
21546 BUG_ON(!pages[0]);
21547- local_irq_save(flags);
21548- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
21549- if (pages[1])
21550- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
21551- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
21552- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
21553- clear_fixmap(FIX_TEXT_POKE0);
21554- if (pages[1])
21555- clear_fixmap(FIX_TEXT_POKE1);
21556- local_flush_tlb();
21557- sync_core();
21558- /* Could also do a CLFLUSH here to speed up CPU recovery; but
21559- that causes hangs on some VIA CPUs. */
21560+ text_poke_early(addr, opcode, len);
21561 for (i = 0; i < len; i++)
21562- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
21563- local_irq_restore(flags);
21564+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
21565 return addr;
21566 }
21567
21568@@ -601,7 +615,7 @@ int poke_int3_handler(struct pt_regs *regs)
21569 if (likely(!bp_patching_in_progress))
21570 return 0;
21571
21572- if (user_mode_vm(regs) || regs->ip != (unsigned long)bp_int3_addr)
21573+ if (user_mode(regs) || regs->ip != (unsigned long)bp_int3_addr)
21574 return 0;
21575
21576 /* set up the specified breakpoint handler */
21577@@ -635,7 +649,7 @@ int poke_int3_handler(struct pt_regs *regs)
21578 */
21579 void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler)
21580 {
21581- unsigned char int3 = 0xcc;
21582+ const unsigned char int3 = 0xcc;
21583
21584 bp_int3_handler = handler;
21585 bp_int3_addr = (u8 *)addr + sizeof(int3);
21586diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
21587index 6776027..972266c 100644
21588--- a/arch/x86/kernel/apic/apic.c
21589+++ b/arch/x86/kernel/apic/apic.c
21590@@ -201,7 +201,7 @@ int first_system_vector = 0xfe;
21591 /*
21592 * Debug level, exported for io_apic.c
21593 */
21594-unsigned int apic_verbosity;
21595+int apic_verbosity;
21596
21597 int pic_mode;
21598
21599@@ -1989,7 +1989,7 @@ static inline void __smp_error_interrupt(struct pt_regs *regs)
21600 apic_write(APIC_ESR, 0);
21601 v = apic_read(APIC_ESR);
21602 ack_APIC_irq();
21603- atomic_inc(&irq_err_count);
21604+ atomic_inc_unchecked(&irq_err_count);
21605
21606 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x",
21607 smp_processor_id(), v);
21608diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
21609index de918c4..32eed23 100644
21610--- a/arch/x86/kernel/apic/apic_flat_64.c
21611+++ b/arch/x86/kernel/apic/apic_flat_64.c
21612@@ -154,7 +154,7 @@ static int flat_probe(void)
21613 return 1;
21614 }
21615
21616-static struct apic apic_flat = {
21617+static struct apic apic_flat __read_only = {
21618 .name = "flat",
21619 .probe = flat_probe,
21620 .acpi_madt_oem_check = flat_acpi_madt_oem_check,
21621@@ -260,7 +260,7 @@ static int physflat_probe(void)
21622 return 0;
21623 }
21624
21625-static struct apic apic_physflat = {
21626+static struct apic apic_physflat __read_only = {
21627
21628 .name = "physical flat",
21629 .probe = physflat_probe,
21630diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c
21631index b205cdb..d8503ff 100644
21632--- a/arch/x86/kernel/apic/apic_noop.c
21633+++ b/arch/x86/kernel/apic/apic_noop.c
21634@@ -108,7 +108,7 @@ static void noop_apic_write(u32 reg, u32 v)
21635 WARN_ON_ONCE(cpu_has_apic && !disable_apic);
21636 }
21637
21638-struct apic apic_noop = {
21639+struct apic apic_noop __read_only = {
21640 .name = "noop",
21641 .probe = noop_probe,
21642 .acpi_madt_oem_check = NULL,
21643diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c
21644index c4a8d63..fe893ac 100644
21645--- a/arch/x86/kernel/apic/bigsmp_32.c
21646+++ b/arch/x86/kernel/apic/bigsmp_32.c
21647@@ -147,7 +147,7 @@ static int probe_bigsmp(void)
21648 return dmi_bigsmp;
21649 }
21650
21651-static struct apic apic_bigsmp = {
21652+static struct apic apic_bigsmp __read_only = {
21653
21654 .name = "bigsmp",
21655 .probe = probe_bigsmp,
21656diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
21657index 337ce5a..c8d98b4 100644
21658--- a/arch/x86/kernel/apic/io_apic.c
21659+++ b/arch/x86/kernel/apic/io_apic.c
21660@@ -1230,7 +1230,7 @@ out:
21661 }
21662 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
21663
21664-void lock_vector_lock(void)
21665+void lock_vector_lock(void) __acquires(vector_lock)
21666 {
21667 /* Used to the online set of cpus does not change
21668 * during assign_irq_vector.
21669@@ -1238,7 +1238,7 @@ void lock_vector_lock(void)
21670 raw_spin_lock(&vector_lock);
21671 }
21672
21673-void unlock_vector_lock(void)
21674+void unlock_vector_lock(void) __releases(vector_lock)
21675 {
21676 raw_spin_unlock(&vector_lock);
21677 }
21678@@ -2465,7 +2465,7 @@ static void ack_apic_edge(struct irq_data *data)
21679 ack_APIC_irq();
21680 }
21681
21682-atomic_t irq_mis_count;
21683+atomic_unchecked_t irq_mis_count;
21684
21685 #ifdef CONFIG_GENERIC_PENDING_IRQ
21686 static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
21687@@ -2606,7 +2606,7 @@ static void ack_apic_level(struct irq_data *data)
21688 * at the cpu.
21689 */
21690 if (!(v & (1 << (i & 0x1f)))) {
21691- atomic_inc(&irq_mis_count);
21692+ atomic_inc_unchecked(&irq_mis_count);
21693
21694 eoi_ioapic_irq(irq, cfg);
21695 }
21696diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c
21697index bda4886..f9c7195 100644
21698--- a/arch/x86/kernel/apic/probe_32.c
21699+++ b/arch/x86/kernel/apic/probe_32.c
21700@@ -72,7 +72,7 @@ static int probe_default(void)
21701 return 1;
21702 }
21703
21704-static struct apic apic_default = {
21705+static struct apic apic_default __read_only = {
21706
21707 .name = "default",
21708 .probe = probe_default,
21709diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
21710index 6ce600f..cb44af8 100644
21711--- a/arch/x86/kernel/apic/x2apic_cluster.c
21712+++ b/arch/x86/kernel/apic/x2apic_cluster.c
21713@@ -182,7 +182,7 @@ update_clusterinfo(struct notifier_block *nfb, unsigned long action, void *hcpu)
21714 return notifier_from_errno(err);
21715 }
21716
21717-static struct notifier_block __refdata x2apic_cpu_notifier = {
21718+static struct notifier_block x2apic_cpu_notifier = {
21719 .notifier_call = update_clusterinfo,
21720 };
21721
21722@@ -234,7 +234,7 @@ static void cluster_vector_allocation_domain(int cpu, struct cpumask *retmask,
21723 cpumask_and(retmask, mask, per_cpu(cpus_in_cluster, cpu));
21724 }
21725
21726-static struct apic apic_x2apic_cluster = {
21727+static struct apic apic_x2apic_cluster __read_only = {
21728
21729 .name = "cluster x2apic",
21730 .probe = x2apic_cluster_probe,
21731diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
21732index 6fae733..5ca17af 100644
21733--- a/arch/x86/kernel/apic/x2apic_phys.c
21734+++ b/arch/x86/kernel/apic/x2apic_phys.c
21735@@ -88,7 +88,7 @@ static int x2apic_phys_probe(void)
21736 return apic == &apic_x2apic_phys;
21737 }
21738
21739-static struct apic apic_x2apic_phys = {
21740+static struct apic apic_x2apic_phys __read_only = {
21741
21742 .name = "physical x2apic",
21743 .probe = x2apic_phys_probe,
21744diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
21745index 004f017..8fbc8b5 100644
21746--- a/arch/x86/kernel/apic/x2apic_uv_x.c
21747+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
21748@@ -350,7 +350,7 @@ static int uv_probe(void)
21749 return apic == &apic_x2apic_uv_x;
21750 }
21751
21752-static struct apic __refdata apic_x2apic_uv_x = {
21753+static struct apic apic_x2apic_uv_x __read_only = {
21754
21755 .name = "UV large system",
21756 .probe = uv_probe,
21757diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
21758index 5848744..56cb598 100644
21759--- a/arch/x86/kernel/apm_32.c
21760+++ b/arch/x86/kernel/apm_32.c
21761@@ -433,7 +433,7 @@ static DEFINE_MUTEX(apm_mutex);
21762 * This is for buggy BIOS's that refer to (real mode) segment 0x40
21763 * even though they are called in protected mode.
21764 */
21765-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
21766+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
21767 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
21768
21769 static const char driver_version[] = "1.16ac"; /* no spaces */
21770@@ -611,7 +611,10 @@ static long __apm_bios_call(void *_call)
21771 BUG_ON(cpu != 0);
21772 gdt = get_cpu_gdt_table(cpu);
21773 save_desc_40 = gdt[0x40 / 8];
21774+
21775+ pax_open_kernel();
21776 gdt[0x40 / 8] = bad_bios_desc;
21777+ pax_close_kernel();
21778
21779 apm_irq_save(flags);
21780 APM_DO_SAVE_SEGS;
21781@@ -620,7 +623,11 @@ static long __apm_bios_call(void *_call)
21782 &call->esi);
21783 APM_DO_RESTORE_SEGS;
21784 apm_irq_restore(flags);
21785+
21786+ pax_open_kernel();
21787 gdt[0x40 / 8] = save_desc_40;
21788+ pax_close_kernel();
21789+
21790 put_cpu();
21791
21792 return call->eax & 0xff;
21793@@ -687,7 +694,10 @@ static long __apm_bios_call_simple(void *_call)
21794 BUG_ON(cpu != 0);
21795 gdt = get_cpu_gdt_table(cpu);
21796 save_desc_40 = gdt[0x40 / 8];
21797+
21798+ pax_open_kernel();
21799 gdt[0x40 / 8] = bad_bios_desc;
21800+ pax_close_kernel();
21801
21802 apm_irq_save(flags);
21803 APM_DO_SAVE_SEGS;
21804@@ -695,7 +705,11 @@ static long __apm_bios_call_simple(void *_call)
21805 &call->eax);
21806 APM_DO_RESTORE_SEGS;
21807 apm_irq_restore(flags);
21808+
21809+ pax_open_kernel();
21810 gdt[0x40 / 8] = save_desc_40;
21811+ pax_close_kernel();
21812+
21813 put_cpu();
21814 return error;
21815 }
21816@@ -2350,12 +2364,15 @@ static int __init apm_init(void)
21817 * code to that CPU.
21818 */
21819 gdt = get_cpu_gdt_table(0);
21820+
21821+ pax_open_kernel();
21822 set_desc_base(&gdt[APM_CS >> 3],
21823 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
21824 set_desc_base(&gdt[APM_CS_16 >> 3],
21825 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
21826 set_desc_base(&gdt[APM_DS >> 3],
21827 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
21828+ pax_close_kernel();
21829
21830 proc_create("apm", 0, NULL, &apm_file_ops);
21831
21832diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
21833index 9f6b934..cf5ffb3 100644
21834--- a/arch/x86/kernel/asm-offsets.c
21835+++ b/arch/x86/kernel/asm-offsets.c
21836@@ -32,6 +32,8 @@ void common(void) {
21837 OFFSET(TI_flags, thread_info, flags);
21838 OFFSET(TI_status, thread_info, status);
21839 OFFSET(TI_addr_limit, thread_info, addr_limit);
21840+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
21841+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
21842
21843 BLANK();
21844 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
21845@@ -52,8 +54,26 @@ void common(void) {
21846 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
21847 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
21848 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
21849+
21850+#ifdef CONFIG_PAX_KERNEXEC
21851+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
21852 #endif
21853
21854+#ifdef CONFIG_PAX_MEMORY_UDEREF
21855+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
21856+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
21857+#ifdef CONFIG_X86_64
21858+ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
21859+#endif
21860+#endif
21861+
21862+#endif
21863+
21864+ BLANK();
21865+ DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
21866+ DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
21867+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
21868+
21869 #ifdef CONFIG_XEN
21870 BLANK();
21871 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
21872diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
21873index e7c798b..2b2019b 100644
21874--- a/arch/x86/kernel/asm-offsets_64.c
21875+++ b/arch/x86/kernel/asm-offsets_64.c
21876@@ -77,6 +77,7 @@ int main(void)
21877 BLANK();
21878 #undef ENTRY
21879
21880+ DEFINE(TSS_size, sizeof(struct tss_struct));
21881 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
21882 BLANK();
21883
21884diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
21885index 7fd54f0..0691410 100644
21886--- a/arch/x86/kernel/cpu/Makefile
21887+++ b/arch/x86/kernel/cpu/Makefile
21888@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
21889 CFLAGS_REMOVE_perf_event.o = -pg
21890 endif
21891
21892-# Make sure load_percpu_segment has no stackprotector
21893-nostackp := $(call cc-option, -fno-stack-protector)
21894-CFLAGS_common.o := $(nostackp)
21895-
21896 obj-y := intel_cacheinfo.o scattered.o topology.o
21897 obj-y += proc.o capflags.o powerflags.o common.o
21898 obj-y += rdrand.o
21899diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
21900index 60e5497..8efbd2f 100644
21901--- a/arch/x86/kernel/cpu/amd.c
21902+++ b/arch/x86/kernel/cpu/amd.c
21903@@ -711,7 +711,7 @@ static void init_amd(struct cpuinfo_x86 *c)
21904 static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
21905 {
21906 /* AMD errata T13 (order #21922) */
21907- if ((c->x86 == 6)) {
21908+ if (c->x86 == 6) {
21909 /* Duron Rev A0 */
21910 if (c->x86_model == 3 && c->x86_mask == 0)
21911 size = 64;
21912diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
21913index e4ab2b4..d487ba5 100644
21914--- a/arch/x86/kernel/cpu/common.c
21915+++ b/arch/x86/kernel/cpu/common.c
21916@@ -90,60 +90,6 @@ static const struct cpu_dev default_cpu = {
21917
21918 static const struct cpu_dev *this_cpu = &default_cpu;
21919
21920-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
21921-#ifdef CONFIG_X86_64
21922- /*
21923- * We need valid kernel segments for data and code in long mode too
21924- * IRET will check the segment types kkeil 2000/10/28
21925- * Also sysret mandates a special GDT layout
21926- *
21927- * TLS descriptors are currently at a different place compared to i386.
21928- * Hopefully nobody expects them at a fixed place (Wine?)
21929- */
21930- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
21931- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
21932- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
21933- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
21934- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
21935- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
21936-#else
21937- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
21938- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
21939- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
21940- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
21941- /*
21942- * Segments used for calling PnP BIOS have byte granularity.
21943- * They code segments and data segments have fixed 64k limits,
21944- * the transfer segment sizes are set at run time.
21945- */
21946- /* 32-bit code */
21947- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
21948- /* 16-bit code */
21949- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
21950- /* 16-bit data */
21951- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
21952- /* 16-bit data */
21953- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
21954- /* 16-bit data */
21955- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
21956- /*
21957- * The APM segments have byte granularity and their bases
21958- * are set at run time. All have 64k limits.
21959- */
21960- /* 32-bit code */
21961- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
21962- /* 16-bit code */
21963- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
21964- /* data */
21965- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
21966-
21967- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
21968- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
21969- GDT_STACK_CANARY_INIT
21970-#endif
21971-} };
21972-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
21973-
21974 static int __init x86_xsave_setup(char *s)
21975 {
21976 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
21977@@ -303,6 +249,59 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c)
21978 }
21979 }
21980
21981+#ifdef CONFIG_X86_64
21982+static __init int setup_disable_pcid(char *arg)
21983+{
21984+ setup_clear_cpu_cap(X86_FEATURE_PCID);
21985+ setup_clear_cpu_cap(X86_FEATURE_INVPCID);
21986+
21987+#ifdef CONFIG_PAX_MEMORY_UDEREF
21988+ if (clone_pgd_mask != ~(pgdval_t)0UL)
21989+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
21990+#endif
21991+
21992+ return 1;
21993+}
21994+__setup("nopcid", setup_disable_pcid);
21995+
21996+static void setup_pcid(struct cpuinfo_x86 *c)
21997+{
21998+ if (!cpu_has(c, X86_FEATURE_PCID)) {
21999+ clear_cpu_cap(c, X86_FEATURE_INVPCID);
22000+
22001+#ifdef CONFIG_PAX_MEMORY_UDEREF
22002+ if (clone_pgd_mask != ~(pgdval_t)0UL) {
22003+ pax_open_kernel();
22004+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
22005+ pax_close_kernel();
22006+ printk("PAX: slow and weak UDEREF enabled\n");
22007+ } else
22008+ printk("PAX: UDEREF disabled\n");
22009+#endif
22010+
22011+ return;
22012+ }
22013+
22014+ printk("PAX: PCID detected\n");
22015+ set_in_cr4(X86_CR4_PCIDE);
22016+
22017+#ifdef CONFIG_PAX_MEMORY_UDEREF
22018+ pax_open_kernel();
22019+ clone_pgd_mask = ~(pgdval_t)0UL;
22020+ pax_close_kernel();
22021+ if (pax_user_shadow_base)
22022+ printk("PAX: weak UDEREF enabled\n");
22023+ else {
22024+ set_cpu_cap(c, X86_FEATURE_STRONGUDEREF);
22025+ printk("PAX: strong UDEREF enabled\n");
22026+ }
22027+#endif
22028+
22029+ if (cpu_has(c, X86_FEATURE_INVPCID))
22030+ printk("PAX: INVPCID detected\n");
22031+}
22032+#endif
22033+
22034 /*
22035 * Some CPU features depend on higher CPUID levels, which may not always
22036 * be available due to CPUID level capping or broken virtualization
22037@@ -403,7 +402,7 @@ void switch_to_new_gdt(int cpu)
22038 {
22039 struct desc_ptr gdt_descr;
22040
22041- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
22042+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
22043 gdt_descr.size = GDT_SIZE - 1;
22044 load_gdt(&gdt_descr);
22045 /* Reload the per-cpu base */
22046@@ -893,6 +892,10 @@ static void identify_cpu(struct cpuinfo_x86 *c)
22047 setup_smep(c);
22048 setup_smap(c);
22049
22050+#ifdef CONFIG_X86_64
22051+ setup_pcid(c);
22052+#endif
22053+
22054 /*
22055 * The vendor-specific functions might have changed features.
22056 * Now we do "generic changes."
22057@@ -901,6 +904,10 @@ static void identify_cpu(struct cpuinfo_x86 *c)
22058 /* Filter out anything that depends on CPUID levels we don't have */
22059 filter_cpuid_features(c, true);
22060
22061+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
22062+ setup_clear_cpu_cap(X86_FEATURE_SEP);
22063+#endif
22064+
22065 /* If the model name is still unset, do table lookup. */
22066 if (!c->x86_model_id[0]) {
22067 const char *p;
22068@@ -981,7 +988,7 @@ static void syscall32_cpu_init(void)
22069 void enable_sep_cpu(void)
22070 {
22071 int cpu = get_cpu();
22072- struct tss_struct *tss = &per_cpu(init_tss, cpu);
22073+ struct tss_struct *tss = init_tss + cpu;
22074
22075 if (!boot_cpu_has(X86_FEATURE_SEP)) {
22076 put_cpu();
22077@@ -1121,14 +1128,16 @@ static __init int setup_disablecpuid(char *arg)
22078 }
22079 __setup("clearcpuid=", setup_disablecpuid);
22080
22081+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
22082+EXPORT_PER_CPU_SYMBOL(current_tinfo);
22083+
22084 DEFINE_PER_CPU(unsigned long, kernel_stack) =
22085- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
22086+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
22087 EXPORT_PER_CPU_SYMBOL(kernel_stack);
22088
22089 #ifdef CONFIG_X86_64
22090-struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
22091-struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1,
22092- (unsigned long) debug_idt_table };
22093+struct desc_ptr idt_descr __read_only = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
22094+const struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) debug_idt_table };
22095
22096 DEFINE_PER_CPU_FIRST(union irq_stack_union,
22097 irq_stack_union) __aligned(PAGE_SIZE) __visible;
22098@@ -1291,7 +1300,7 @@ void cpu_init(void)
22099 load_ucode_ap();
22100
22101 cpu = stack_smp_processor_id();
22102- t = &per_cpu(init_tss, cpu);
22103+ t = init_tss + cpu;
22104 oist = &per_cpu(orig_ist, cpu);
22105
22106 #ifdef CONFIG_NUMA
22107@@ -1326,7 +1335,6 @@ void cpu_init(void)
22108 wrmsrl(MSR_KERNEL_GS_BASE, 0);
22109 barrier();
22110
22111- x86_configure_nx();
22112 enable_x2apic();
22113
22114 /*
22115@@ -1378,7 +1386,7 @@ void cpu_init(void)
22116 {
22117 int cpu = smp_processor_id();
22118 struct task_struct *curr = current;
22119- struct tss_struct *t = &per_cpu(init_tss, cpu);
22120+ struct tss_struct *t = init_tss + cpu;
22121 struct thread_struct *thread = &curr->thread;
22122
22123 show_ucode_info_early();
22124diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
22125index c703507..28535e3 100644
22126--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
22127+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
22128@@ -1026,6 +1026,22 @@ static struct attribute *default_attrs[] = {
22129 };
22130
22131 #ifdef CONFIG_AMD_NB
22132+static struct attribute *default_attrs_amd_nb[] = {
22133+ &type.attr,
22134+ &level.attr,
22135+ &coherency_line_size.attr,
22136+ &physical_line_partition.attr,
22137+ &ways_of_associativity.attr,
22138+ &number_of_sets.attr,
22139+ &size.attr,
22140+ &shared_cpu_map.attr,
22141+ &shared_cpu_list.attr,
22142+ NULL,
22143+ NULL,
22144+ NULL,
22145+ NULL
22146+};
22147+
22148 static struct attribute **amd_l3_attrs(void)
22149 {
22150 static struct attribute **attrs;
22151@@ -1036,18 +1052,7 @@ static struct attribute **amd_l3_attrs(void)
22152
22153 n = ARRAY_SIZE(default_attrs);
22154
22155- if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
22156- n += 2;
22157-
22158- if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
22159- n += 1;
22160-
22161- attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL);
22162- if (attrs == NULL)
22163- return attrs = default_attrs;
22164-
22165- for (n = 0; default_attrs[n]; n++)
22166- attrs[n] = default_attrs[n];
22167+ attrs = default_attrs_amd_nb;
22168
22169 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) {
22170 attrs[n++] = &cache_disable_0.attr;
22171@@ -1098,6 +1103,13 @@ static struct kobj_type ktype_cache = {
22172 .default_attrs = default_attrs,
22173 };
22174
22175+#ifdef CONFIG_AMD_NB
22176+static struct kobj_type ktype_cache_amd_nb = {
22177+ .sysfs_ops = &sysfs_ops,
22178+ .default_attrs = default_attrs_amd_nb,
22179+};
22180+#endif
22181+
22182 static struct kobj_type ktype_percpu_entry = {
22183 .sysfs_ops = &sysfs_ops,
22184 };
22185@@ -1163,20 +1175,26 @@ static int cache_add_dev(struct device *dev)
22186 return retval;
22187 }
22188
22189+#ifdef CONFIG_AMD_NB
22190+ amd_l3_attrs();
22191+#endif
22192+
22193 for (i = 0; i < num_cache_leaves; i++) {
22194+ struct kobj_type *ktype;
22195+
22196 this_object = INDEX_KOBJECT_PTR(cpu, i);
22197 this_object->cpu = cpu;
22198 this_object->index = i;
22199
22200 this_leaf = CPUID4_INFO_IDX(cpu, i);
22201
22202- ktype_cache.default_attrs = default_attrs;
22203+ ktype = &ktype_cache;
22204 #ifdef CONFIG_AMD_NB
22205 if (this_leaf->base.nb)
22206- ktype_cache.default_attrs = amd_l3_attrs();
22207+ ktype = &ktype_cache_amd_nb;
22208 #endif
22209 retval = kobject_init_and_add(&(this_object->kobj),
22210- &ktype_cache,
22211+ ktype,
22212 per_cpu(ici_cache_kobject, cpu),
22213 "index%1lu", i);
22214 if (unlikely(retval)) {
22215diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
22216index bd9ccda..38314e7 100644
22217--- a/arch/x86/kernel/cpu/mcheck/mce.c
22218+++ b/arch/x86/kernel/cpu/mcheck/mce.c
22219@@ -45,6 +45,7 @@
22220 #include <asm/processor.h>
22221 #include <asm/mce.h>
22222 #include <asm/msr.h>
22223+#include <asm/local.h>
22224
22225 #include "mce-internal.h"
22226
22227@@ -259,7 +260,7 @@ static void print_mce(struct mce *m)
22228 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
22229 m->cs, m->ip);
22230
22231- if (m->cs == __KERNEL_CS)
22232+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
22233 print_symbol("{%s}", m->ip);
22234 pr_cont("\n");
22235 }
22236@@ -292,10 +293,10 @@ static void print_mce(struct mce *m)
22237
22238 #define PANIC_TIMEOUT 5 /* 5 seconds */
22239
22240-static atomic_t mce_paniced;
22241+static atomic_unchecked_t mce_paniced;
22242
22243 static int fake_panic;
22244-static atomic_t mce_fake_paniced;
22245+static atomic_unchecked_t mce_fake_paniced;
22246
22247 /* Panic in progress. Enable interrupts and wait for final IPI */
22248 static void wait_for_panic(void)
22249@@ -319,7 +320,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
22250 /*
22251 * Make sure only one CPU runs in machine check panic
22252 */
22253- if (atomic_inc_return(&mce_paniced) > 1)
22254+ if (atomic_inc_return_unchecked(&mce_paniced) > 1)
22255 wait_for_panic();
22256 barrier();
22257
22258@@ -327,7 +328,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
22259 console_verbose();
22260 } else {
22261 /* Don't log too much for fake panic */
22262- if (atomic_inc_return(&mce_fake_paniced) > 1)
22263+ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
22264 return;
22265 }
22266 /* First print corrected ones that are still unlogged */
22267@@ -366,7 +367,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
22268 if (!fake_panic) {
22269 if (panic_timeout == 0)
22270 panic_timeout = mca_cfg.panic_timeout;
22271- panic(msg);
22272+ panic("%s", msg);
22273 } else
22274 pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg);
22275 }
22276@@ -697,7 +698,7 @@ static int mce_timed_out(u64 *t)
22277 * might have been modified by someone else.
22278 */
22279 rmb();
22280- if (atomic_read(&mce_paniced))
22281+ if (atomic_read_unchecked(&mce_paniced))
22282 wait_for_panic();
22283 if (!mca_cfg.monarch_timeout)
22284 goto out;
22285@@ -1674,7 +1675,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
22286 }
22287
22288 /* Call the installed machine check handler for this CPU setup. */
22289-void (*machine_check_vector)(struct pt_regs *, long error_code) =
22290+void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
22291 unexpected_machine_check;
22292
22293 /*
22294@@ -1697,7 +1698,9 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c)
22295 return;
22296 }
22297
22298+ pax_open_kernel();
22299 machine_check_vector = do_machine_check;
22300+ pax_close_kernel();
22301
22302 __mcheck_cpu_init_generic();
22303 __mcheck_cpu_init_vendor(c);
22304@@ -1711,7 +1714,7 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c)
22305 */
22306
22307 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
22308-static int mce_chrdev_open_count; /* #times opened */
22309+static local_t mce_chrdev_open_count; /* #times opened */
22310 static int mce_chrdev_open_exclu; /* already open exclusive? */
22311
22312 static int mce_chrdev_open(struct inode *inode, struct file *file)
22313@@ -1719,7 +1722,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
22314 spin_lock(&mce_chrdev_state_lock);
22315
22316 if (mce_chrdev_open_exclu ||
22317- (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
22318+ (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
22319 spin_unlock(&mce_chrdev_state_lock);
22320
22321 return -EBUSY;
22322@@ -1727,7 +1730,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
22323
22324 if (file->f_flags & O_EXCL)
22325 mce_chrdev_open_exclu = 1;
22326- mce_chrdev_open_count++;
22327+ local_inc(&mce_chrdev_open_count);
22328
22329 spin_unlock(&mce_chrdev_state_lock);
22330
22331@@ -1738,7 +1741,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
22332 {
22333 spin_lock(&mce_chrdev_state_lock);
22334
22335- mce_chrdev_open_count--;
22336+ local_dec(&mce_chrdev_open_count);
22337 mce_chrdev_open_exclu = 0;
22338
22339 spin_unlock(&mce_chrdev_state_lock);
22340@@ -2413,7 +2416,7 @@ static __init void mce_init_banks(void)
22341
22342 for (i = 0; i < mca_cfg.banks; i++) {
22343 struct mce_bank *b = &mce_banks[i];
22344- struct device_attribute *a = &b->attr;
22345+ device_attribute_no_const *a = &b->attr;
22346
22347 sysfs_attr_init(&a->attr);
22348 a->attr.name = b->attrname;
22349@@ -2520,7 +2523,7 @@ struct dentry *mce_get_debugfs_dir(void)
22350 static void mce_reset(void)
22351 {
22352 cpu_missing = 0;
22353- atomic_set(&mce_fake_paniced, 0);
22354+ atomic_set_unchecked(&mce_fake_paniced, 0);
22355 atomic_set(&mce_executing, 0);
22356 atomic_set(&mce_callin, 0);
22357 atomic_set(&global_nwo, 0);
22358diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
22359index a304298..49b6d06 100644
22360--- a/arch/x86/kernel/cpu/mcheck/p5.c
22361+++ b/arch/x86/kernel/cpu/mcheck/p5.c
22362@@ -10,6 +10,7 @@
22363 #include <asm/processor.h>
22364 #include <asm/mce.h>
22365 #include <asm/msr.h>
22366+#include <asm/pgtable.h>
22367
22368 /* By default disabled */
22369 int mce_p5_enabled __read_mostly;
22370@@ -48,7 +49,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
22371 if (!cpu_has(c, X86_FEATURE_MCE))
22372 return;
22373
22374+ pax_open_kernel();
22375 machine_check_vector = pentium_machine_check;
22376+ pax_close_kernel();
22377 /* Make sure the vector pointer is visible before we enable MCEs: */
22378 wmb();
22379
22380diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
22381index 7dc5564..1273569 100644
22382--- a/arch/x86/kernel/cpu/mcheck/winchip.c
22383+++ b/arch/x86/kernel/cpu/mcheck/winchip.c
22384@@ -9,6 +9,7 @@
22385 #include <asm/processor.h>
22386 #include <asm/mce.h>
22387 #include <asm/msr.h>
22388+#include <asm/pgtable.h>
22389
22390 /* Machine check handler for WinChip C6: */
22391 static void winchip_machine_check(struct pt_regs *regs, long error_code)
22392@@ -22,7 +23,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
22393 {
22394 u32 lo, hi;
22395
22396+ pax_open_kernel();
22397 machine_check_vector = winchip_machine_check;
22398+ pax_close_kernel();
22399 /* Make sure the vector pointer is visible before we enable MCEs: */
22400 wmb();
22401
22402diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
22403index dd9d619..86e1d81 100644
22404--- a/arch/x86/kernel/cpu/microcode/core.c
22405+++ b/arch/x86/kernel/cpu/microcode/core.c
22406@@ -516,7 +516,7 @@ mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
22407 return NOTIFY_OK;
22408 }
22409
22410-static struct notifier_block __refdata mc_cpu_notifier = {
22411+static struct notifier_block mc_cpu_notifier = {
22412 .notifier_call = mc_cpu_callback,
22413 };
22414
22415diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
22416index a276fa7..e66810f 100644
22417--- a/arch/x86/kernel/cpu/microcode/intel.c
22418+++ b/arch/x86/kernel/cpu/microcode/intel.c
22419@@ -293,13 +293,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device,
22420
22421 static int get_ucode_user(void *to, const void *from, size_t n)
22422 {
22423- return copy_from_user(to, from, n);
22424+ return copy_from_user(to, (const void __force_user *)from, n);
22425 }
22426
22427 static enum ucode_state
22428 request_microcode_user(int cpu, const void __user *buf, size_t size)
22429 {
22430- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
22431+ return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
22432 }
22433
22434 static void microcode_fini_cpu(int cpu)
22435diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
22436index f961de9..8a9d332 100644
22437--- a/arch/x86/kernel/cpu/mtrr/main.c
22438+++ b/arch/x86/kernel/cpu/mtrr/main.c
22439@@ -66,7 +66,7 @@ static DEFINE_MUTEX(mtrr_mutex);
22440 u64 size_or_mask, size_and_mask;
22441 static bool mtrr_aps_delayed_init;
22442
22443-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
22444+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
22445
22446 const struct mtrr_ops *mtrr_if;
22447
22448diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
22449index df5e41f..816c719 100644
22450--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
22451+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
22452@@ -25,7 +25,7 @@ struct mtrr_ops {
22453 int (*validate_add_page)(unsigned long base, unsigned long size,
22454 unsigned int type);
22455 int (*have_wrcomb)(void);
22456-};
22457+} __do_const;
22458
22459 extern int generic_get_free_region(unsigned long base, unsigned long size,
22460 int replace_reg);
22461diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
22462index 2879ecd..bb8c80b 100644
22463--- a/arch/x86/kernel/cpu/perf_event.c
22464+++ b/arch/x86/kernel/cpu/perf_event.c
22465@@ -1372,7 +1372,7 @@ static void __init pmu_check_apic(void)
22466
22467 }
22468
22469-static struct attribute_group x86_pmu_format_group = {
22470+static attribute_group_no_const x86_pmu_format_group = {
22471 .name = "format",
22472 .attrs = NULL,
22473 };
22474@@ -1471,7 +1471,7 @@ static struct attribute *events_attr[] = {
22475 NULL,
22476 };
22477
22478-static struct attribute_group x86_pmu_events_group = {
22479+static attribute_group_no_const x86_pmu_events_group = {
22480 .name = "events",
22481 .attrs = events_attr,
22482 };
22483@@ -1995,7 +1995,7 @@ static unsigned long get_segment_base(unsigned int segment)
22484 if (idx > GDT_ENTRIES)
22485 return 0;
22486
22487- desc = __this_cpu_ptr(&gdt_page.gdt[0]);
22488+ desc = get_cpu_gdt_table(smp_processor_id());
22489 }
22490
22491 return get_desc_base(desc + idx);
22492@@ -2085,7 +2085,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
22493 break;
22494
22495 perf_callchain_store(entry, frame.return_address);
22496- fp = frame.next_frame;
22497+ fp = (const void __force_user *)frame.next_frame;
22498 }
22499 }
22500
22501diff --git a/arch/x86/kernel/cpu/perf_event_amd_iommu.c b/arch/x86/kernel/cpu/perf_event_amd_iommu.c
22502index 639d128..e92d7e5 100644
22503--- a/arch/x86/kernel/cpu/perf_event_amd_iommu.c
22504+++ b/arch/x86/kernel/cpu/perf_event_amd_iommu.c
22505@@ -405,7 +405,7 @@ static void perf_iommu_del(struct perf_event *event, int flags)
22506 static __init int _init_events_attrs(struct perf_amd_iommu *perf_iommu)
22507 {
22508 struct attribute **attrs;
22509- struct attribute_group *attr_group;
22510+ attribute_group_no_const *attr_group;
22511 int i = 0, j;
22512
22513 while (amd_iommu_v2_event_descs[i].attr.attr.name)
22514diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
22515index 2502d0d..e5cc05c 100644
22516--- a/arch/x86/kernel/cpu/perf_event_intel.c
22517+++ b/arch/x86/kernel/cpu/perf_event_intel.c
22518@@ -2353,10 +2353,10 @@ __init int intel_pmu_init(void)
22519 x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
22520
22521 if (boot_cpu_has(X86_FEATURE_PDCM)) {
22522- u64 capabilities;
22523+ u64 capabilities = x86_pmu.intel_cap.capabilities;
22524
22525- rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
22526- x86_pmu.intel_cap.capabilities = capabilities;
22527+ if (rdmsrl_safe(MSR_IA32_PERF_CAPABILITIES, &x86_pmu.intel_cap.capabilities))
22528+ x86_pmu.intel_cap.capabilities = capabilities;
22529 }
22530
22531 intel_ds_init();
22532diff --git a/arch/x86/kernel/cpu/perf_event_intel_rapl.c b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
22533index 619f769..d510008 100644
22534--- a/arch/x86/kernel/cpu/perf_event_intel_rapl.c
22535+++ b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
22536@@ -449,7 +449,7 @@ static struct attribute *rapl_events_hsw_attr[] = {
22537 NULL,
22538 };
22539
22540-static struct attribute_group rapl_pmu_events_group = {
22541+static attribute_group_no_const rapl_pmu_events_group __read_only = {
22542 .name = "events",
22543 .attrs = NULL, /* patched at runtime */
22544 };
22545diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
22546index 0939f86..69730af 100644
22547--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
22548+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
22549@@ -3691,7 +3691,7 @@ static void __init uncore_types_exit(struct intel_uncore_type **types)
22550 static int __init uncore_type_init(struct intel_uncore_type *type)
22551 {
22552 struct intel_uncore_pmu *pmus;
22553- struct attribute_group *attr_group;
22554+ attribute_group_no_const *attr_group;
22555 struct attribute **attrs;
22556 int i, j;
22557
22558diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
22559index 90236f0..54cb20d 100644
22560--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h
22561+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
22562@@ -503,7 +503,7 @@ struct intel_uncore_box {
22563 struct uncore_event_desc {
22564 struct kobj_attribute attr;
22565 const char *config;
22566-};
22567+} __do_const;
22568
22569 #define INTEL_UNCORE_EVENT_DESC(_name, _config) \
22570 { \
22571diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
22572index 3225ae6c..ee3c6db 100644
22573--- a/arch/x86/kernel/cpuid.c
22574+++ b/arch/x86/kernel/cpuid.c
22575@@ -170,7 +170,7 @@ static int cpuid_class_cpu_callback(struct notifier_block *nfb,
22576 return notifier_from_errno(err);
22577 }
22578
22579-static struct notifier_block __refdata cpuid_class_cpu_notifier =
22580+static struct notifier_block cpuid_class_cpu_notifier =
22581 {
22582 .notifier_call = cpuid_class_cpu_callback,
22583 };
22584diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
22585index a618fcd..200e95b 100644
22586--- a/arch/x86/kernel/crash.c
22587+++ b/arch/x86/kernel/crash.c
22588@@ -104,7 +104,7 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
22589 #ifdef CONFIG_X86_32
22590 struct pt_regs fixed_regs;
22591
22592- if (!user_mode_vm(regs)) {
22593+ if (!user_mode(regs)) {
22594 crash_fixup_ss_esp(&fixed_regs, regs);
22595 regs = &fixed_regs;
22596 }
22597diff --git a/arch/x86/kernel/crash_dump_64.c b/arch/x86/kernel/crash_dump_64.c
22598index afa64ad..dce67dd 100644
22599--- a/arch/x86/kernel/crash_dump_64.c
22600+++ b/arch/x86/kernel/crash_dump_64.c
22601@@ -36,7 +36,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
22602 return -ENOMEM;
22603
22604 if (userbuf) {
22605- if (copy_to_user(buf, vaddr + offset, csize)) {
22606+ if (copy_to_user((char __force_user *)buf, vaddr + offset, csize)) {
22607 iounmap(vaddr);
22608 return -EFAULT;
22609 }
22610diff --git a/arch/x86/kernel/doublefault.c b/arch/x86/kernel/doublefault.c
22611index f6dfd93..892ade4 100644
22612--- a/arch/x86/kernel/doublefault.c
22613+++ b/arch/x86/kernel/doublefault.c
22614@@ -12,7 +12,7 @@
22615
22616 #define DOUBLEFAULT_STACKSIZE (1024)
22617 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
22618-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
22619+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
22620
22621 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
22622
22623@@ -22,7 +22,7 @@ static void doublefault_fn(void)
22624 unsigned long gdt, tss;
22625
22626 native_store_gdt(&gdt_desc);
22627- gdt = gdt_desc.address;
22628+ gdt = (unsigned long)gdt_desc.address;
22629
22630 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
22631
22632@@ -59,10 +59,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
22633 /* 0x2 bit is always set */
22634 .flags = X86_EFLAGS_SF | 0x2,
22635 .sp = STACK_START,
22636- .es = __USER_DS,
22637+ .es = __KERNEL_DS,
22638 .cs = __KERNEL_CS,
22639 .ss = __KERNEL_DS,
22640- .ds = __USER_DS,
22641+ .ds = __KERNEL_DS,
22642 .fs = __KERNEL_PERCPU,
22643
22644 .__cr3 = __pa_nodebug(swapper_pg_dir),
22645diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
22646index b74ebc7..2c95874 100644
22647--- a/arch/x86/kernel/dumpstack.c
22648+++ b/arch/x86/kernel/dumpstack.c
22649@@ -2,6 +2,9 @@
22650 * Copyright (C) 1991, 1992 Linus Torvalds
22651 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
22652 */
22653+#ifdef CONFIG_GRKERNSEC_HIDESYM
22654+#define __INCLUDED_BY_HIDESYM 1
22655+#endif
22656 #include <linux/kallsyms.h>
22657 #include <linux/kprobes.h>
22658 #include <linux/uaccess.h>
22659@@ -33,23 +36,21 @@ static void printk_stack_address(unsigned long address, int reliable)
22660
22661 void printk_address(unsigned long address)
22662 {
22663- pr_cont(" [<%p>] %pS\n", (void *)address, (void *)address);
22664+ pr_cont(" [<%p>] %pA\n", (void *)address, (void *)address);
22665 }
22666
22667 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
22668 static void
22669 print_ftrace_graph_addr(unsigned long addr, void *data,
22670 const struct stacktrace_ops *ops,
22671- struct thread_info *tinfo, int *graph)
22672+ struct task_struct *task, int *graph)
22673 {
22674- struct task_struct *task;
22675 unsigned long ret_addr;
22676 int index;
22677
22678 if (addr != (unsigned long)return_to_handler)
22679 return;
22680
22681- task = tinfo->task;
22682 index = task->curr_ret_stack;
22683
22684 if (!task->ret_stack || index < *graph)
22685@@ -66,7 +67,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
22686 static inline void
22687 print_ftrace_graph_addr(unsigned long addr, void *data,
22688 const struct stacktrace_ops *ops,
22689- struct thread_info *tinfo, int *graph)
22690+ struct task_struct *task, int *graph)
22691 { }
22692 #endif
22693
22694@@ -77,10 +78,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
22695 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
22696 */
22697
22698-static inline int valid_stack_ptr(struct thread_info *tinfo,
22699- void *p, unsigned int size, void *end)
22700+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
22701 {
22702- void *t = tinfo;
22703 if (end) {
22704 if (p < end && p >= (end-THREAD_SIZE))
22705 return 1;
22706@@ -91,14 +90,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
22707 }
22708
22709 unsigned long
22710-print_context_stack(struct thread_info *tinfo,
22711+print_context_stack(struct task_struct *task, void *stack_start,
22712 unsigned long *stack, unsigned long bp,
22713 const struct stacktrace_ops *ops, void *data,
22714 unsigned long *end, int *graph)
22715 {
22716 struct stack_frame *frame = (struct stack_frame *)bp;
22717
22718- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
22719+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
22720 unsigned long addr;
22721
22722 addr = *stack;
22723@@ -110,7 +109,7 @@ print_context_stack(struct thread_info *tinfo,
22724 } else {
22725 ops->address(data, addr, 0);
22726 }
22727- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
22728+ print_ftrace_graph_addr(addr, data, ops, task, graph);
22729 }
22730 stack++;
22731 }
22732@@ -119,7 +118,7 @@ print_context_stack(struct thread_info *tinfo,
22733 EXPORT_SYMBOL_GPL(print_context_stack);
22734
22735 unsigned long
22736-print_context_stack_bp(struct thread_info *tinfo,
22737+print_context_stack_bp(struct task_struct *task, void *stack_start,
22738 unsigned long *stack, unsigned long bp,
22739 const struct stacktrace_ops *ops, void *data,
22740 unsigned long *end, int *graph)
22741@@ -127,7 +126,7 @@ print_context_stack_bp(struct thread_info *tinfo,
22742 struct stack_frame *frame = (struct stack_frame *)bp;
22743 unsigned long *ret_addr = &frame->return_address;
22744
22745- while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
22746+ while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
22747 unsigned long addr = *ret_addr;
22748
22749 if (!__kernel_text_address(addr))
22750@@ -136,7 +135,7 @@ print_context_stack_bp(struct thread_info *tinfo,
22751 ops->address(data, addr, 1);
22752 frame = frame->next_frame;
22753 ret_addr = &frame->return_address;
22754- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
22755+ print_ftrace_graph_addr(addr, data, ops, task, graph);
22756 }
22757
22758 return (unsigned long)frame;
22759@@ -155,7 +154,7 @@ static int print_trace_stack(void *data, char *name)
22760 static void print_trace_address(void *data, unsigned long addr, int reliable)
22761 {
22762 touch_nmi_watchdog();
22763- printk(data);
22764+ printk("%s", (char *)data);
22765 printk_stack_address(addr, reliable);
22766 }
22767
22768@@ -225,6 +224,8 @@ unsigned long oops_begin(void)
22769 EXPORT_SYMBOL_GPL(oops_begin);
22770 NOKPROBE_SYMBOL(oops_begin);
22771
22772+extern void gr_handle_kernel_exploit(void);
22773+
22774 void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
22775 {
22776 if (regs && kexec_should_crash(current))
22777@@ -246,7 +247,10 @@ void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
22778 panic("Fatal exception in interrupt");
22779 if (panic_on_oops)
22780 panic("Fatal exception");
22781- do_exit(signr);
22782+
22783+ gr_handle_kernel_exploit();
22784+
22785+ do_group_exit(signr);
22786 }
22787 NOKPROBE_SYMBOL(oops_end);
22788
22789@@ -275,7 +279,7 @@ int __die(const char *str, struct pt_regs *regs, long err)
22790 print_modules();
22791 show_regs(regs);
22792 #ifdef CONFIG_X86_32
22793- if (user_mode_vm(regs)) {
22794+ if (user_mode(regs)) {
22795 sp = regs->sp;
22796 ss = regs->ss & 0xffff;
22797 } else {
22798@@ -304,7 +308,7 @@ void die(const char *str, struct pt_regs *regs, long err)
22799 unsigned long flags = oops_begin();
22800 int sig = SIGSEGV;
22801
22802- if (!user_mode_vm(regs))
22803+ if (!user_mode(regs))
22804 report_bug(regs->ip, regs);
22805
22806 if (__die(str, regs, err))
22807diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
22808index 5abd4cd..c65733b 100644
22809--- a/arch/x86/kernel/dumpstack_32.c
22810+++ b/arch/x86/kernel/dumpstack_32.c
22811@@ -61,15 +61,14 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22812 bp = stack_frame(task, regs);
22813
22814 for (;;) {
22815- struct thread_info *context;
22816+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
22817 void *end_stack;
22818
22819 end_stack = is_hardirq_stack(stack, cpu);
22820 if (!end_stack)
22821 end_stack = is_softirq_stack(stack, cpu);
22822
22823- context = task_thread_info(task);
22824- bp = ops->walk_stack(context, stack, bp, ops, data,
22825+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data,
22826 end_stack, &graph);
22827
22828 /* Stop if not on irq stack */
22829@@ -123,27 +122,28 @@ void show_regs(struct pt_regs *regs)
22830 int i;
22831
22832 show_regs_print_info(KERN_EMERG);
22833- __show_regs(regs, !user_mode_vm(regs));
22834+ __show_regs(regs, !user_mode(regs));
22835
22836 /*
22837 * When in-kernel, we also print out the stack and code at the
22838 * time of the fault..
22839 */
22840- if (!user_mode_vm(regs)) {
22841+ if (!user_mode(regs)) {
22842 unsigned int code_prologue = code_bytes * 43 / 64;
22843 unsigned int code_len = code_bytes;
22844 unsigned char c;
22845 u8 *ip;
22846+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(0)[(0xffff & regs->cs) >> 3]);
22847
22848 pr_emerg("Stack:\n");
22849 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
22850
22851 pr_emerg("Code:");
22852
22853- ip = (u8 *)regs->ip - code_prologue;
22854+ ip = (u8 *)regs->ip - code_prologue + cs_base;
22855 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
22856 /* try starting at IP */
22857- ip = (u8 *)regs->ip;
22858+ ip = (u8 *)regs->ip + cs_base;
22859 code_len = code_len - code_prologue + 1;
22860 }
22861 for (i = 0; i < code_len; i++, ip++) {
22862@@ -152,7 +152,7 @@ void show_regs(struct pt_regs *regs)
22863 pr_cont(" Bad EIP value.");
22864 break;
22865 }
22866- if (ip == (u8 *)regs->ip)
22867+ if (ip == (u8 *)regs->ip + cs_base)
22868 pr_cont(" <%02x>", c);
22869 else
22870 pr_cont(" %02x", c);
22871@@ -165,6 +165,7 @@ int is_valid_bugaddr(unsigned long ip)
22872 {
22873 unsigned short ud2;
22874
22875+ ip = ktla_ktva(ip);
22876 if (ip < PAGE_OFFSET)
22877 return 0;
22878 if (probe_kernel_address((unsigned short *)ip, ud2))
22879@@ -172,3 +173,15 @@ int is_valid_bugaddr(unsigned long ip)
22880
22881 return ud2 == 0x0b0f;
22882 }
22883+
22884+#if defined(CONFIG_PAX_MEMORY_STACKLEAK) || defined(CONFIG_PAX_USERCOPY)
22885+void pax_check_alloca(unsigned long size)
22886+{
22887+ unsigned long sp = (unsigned long)&sp, stack_left;
22888+
22889+ /* all kernel stacks are of the same size */
22890+ stack_left = sp & (THREAD_SIZE - 1);
22891+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
22892+}
22893+EXPORT_SYMBOL(pax_check_alloca);
22894+#endif
22895diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
22896index 1abcb50..6c8d702 100644
22897--- a/arch/x86/kernel/dumpstack_64.c
22898+++ b/arch/x86/kernel/dumpstack_64.c
22899@@ -154,12 +154,12 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22900 const struct stacktrace_ops *ops, void *data)
22901 {
22902 const unsigned cpu = get_cpu();
22903- struct thread_info *tinfo;
22904 unsigned long *irq_stack = (unsigned long *)per_cpu(irq_stack_ptr, cpu);
22905 unsigned long dummy;
22906 unsigned used = 0;
22907 int graph = 0;
22908 int done = 0;
22909+ void *stack_start;
22910
22911 if (!task)
22912 task = current;
22913@@ -180,7 +180,6 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22914 * current stack address. If the stacks consist of nested
22915 * exceptions
22916 */
22917- tinfo = task_thread_info(task);
22918 while (!done) {
22919 unsigned long *stack_end;
22920 enum stack_type stype;
22921@@ -203,7 +202,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22922 if (ops->stack(data, id) < 0)
22923 break;
22924
22925- bp = ops->walk_stack(tinfo, stack, bp, ops,
22926+ bp = ops->walk_stack(task, stack_end - EXCEPTION_STKSZ, stack, bp, ops,
22927 data, stack_end, &graph);
22928 ops->stack(data, "<EOE>");
22929 /*
22930@@ -211,6 +210,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22931 * second-to-last pointer (index -2 to end) in the
22932 * exception stack:
22933 */
22934+ if ((u16)stack_end[-1] != __KERNEL_DS)
22935+ goto out;
22936 stack = (unsigned long *) stack_end[-2];
22937 done = 0;
22938 break;
22939@@ -219,7 +220,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22940
22941 if (ops->stack(data, "IRQ") < 0)
22942 break;
22943- bp = ops->walk_stack(tinfo, stack, bp,
22944+ bp = ops->walk_stack(task, irq_stack, stack, bp,
22945 ops, data, stack_end, &graph);
22946 /*
22947 * We link to the next stack (which would be
22948@@ -241,7 +242,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22949 /*
22950 * This handles the process stack:
22951 */
22952- bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
22953+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
22954+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
22955+out:
22956 put_cpu();
22957 }
22958 EXPORT_SYMBOL(dump_trace);
22959@@ -350,3 +353,50 @@ int is_valid_bugaddr(unsigned long ip)
22960
22961 return ud2 == 0x0b0f;
22962 }
22963+
22964+#if defined(CONFIG_PAX_MEMORY_STACKLEAK) || defined(CONFIG_PAX_USERCOPY)
22965+void pax_check_alloca(unsigned long size)
22966+{
22967+ unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
22968+ unsigned cpu, used;
22969+ char *id;
22970+
22971+ /* check the process stack first */
22972+ stack_start = (unsigned long)task_stack_page(current);
22973+ stack_end = stack_start + THREAD_SIZE;
22974+ if (likely(stack_start <= sp && sp < stack_end)) {
22975+ unsigned long stack_left = sp & (THREAD_SIZE - 1);
22976+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
22977+ return;
22978+ }
22979+
22980+ cpu = get_cpu();
22981+
22982+ /* check the irq stacks */
22983+ stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
22984+ stack_start = stack_end - IRQ_STACK_SIZE;
22985+ if (stack_start <= sp && sp < stack_end) {
22986+ unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
22987+ put_cpu();
22988+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
22989+ return;
22990+ }
22991+
22992+ /* check the exception stacks */
22993+ used = 0;
22994+ stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
22995+ stack_start = stack_end - EXCEPTION_STKSZ;
22996+ if (stack_end && stack_start <= sp && sp < stack_end) {
22997+ unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
22998+ put_cpu();
22999+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
23000+ return;
23001+ }
23002+
23003+ put_cpu();
23004+
23005+ /* unknown stack */
23006+ BUG();
23007+}
23008+EXPORT_SYMBOL(pax_check_alloca);
23009+#endif
23010diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
23011index 988c00a..4f673b6 100644
23012--- a/arch/x86/kernel/e820.c
23013+++ b/arch/x86/kernel/e820.c
23014@@ -803,8 +803,8 @@ unsigned long __init e820_end_of_low_ram_pfn(void)
23015
23016 static void early_panic(char *msg)
23017 {
23018- early_printk(msg);
23019- panic(msg);
23020+ early_printk("%s", msg);
23021+ panic("%s", msg);
23022 }
23023
23024 static int userdef __initdata;
23025diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
23026index 01d1c18..8073693 100644
23027--- a/arch/x86/kernel/early_printk.c
23028+++ b/arch/x86/kernel/early_printk.c
23029@@ -7,6 +7,7 @@
23030 #include <linux/pci_regs.h>
23031 #include <linux/pci_ids.h>
23032 #include <linux/errno.h>
23033+#include <linux/sched.h>
23034 #include <asm/io.h>
23035 #include <asm/processor.h>
23036 #include <asm/fcntl.h>
23037diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
23038index 4b0e1df..884b67e 100644
23039--- a/arch/x86/kernel/entry_32.S
23040+++ b/arch/x86/kernel/entry_32.S
23041@@ -177,13 +177,153 @@
23042 /*CFI_REL_OFFSET gs, PT_GS*/
23043 .endm
23044 .macro SET_KERNEL_GS reg
23045+
23046+#ifdef CONFIG_CC_STACKPROTECTOR
23047 movl $(__KERNEL_STACK_CANARY), \reg
23048+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
23049+ movl $(__USER_DS), \reg
23050+#else
23051+ xorl \reg, \reg
23052+#endif
23053+
23054 movl \reg, %gs
23055 .endm
23056
23057 #endif /* CONFIG_X86_32_LAZY_GS */
23058
23059-.macro SAVE_ALL
23060+.macro pax_enter_kernel
23061+#ifdef CONFIG_PAX_KERNEXEC
23062+ call pax_enter_kernel
23063+#endif
23064+.endm
23065+
23066+.macro pax_exit_kernel
23067+#ifdef CONFIG_PAX_KERNEXEC
23068+ call pax_exit_kernel
23069+#endif
23070+.endm
23071+
23072+#ifdef CONFIG_PAX_KERNEXEC
23073+ENTRY(pax_enter_kernel)
23074+#ifdef CONFIG_PARAVIRT
23075+ pushl %eax
23076+ pushl %ecx
23077+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
23078+ mov %eax, %esi
23079+#else
23080+ mov %cr0, %esi
23081+#endif
23082+ bts $16, %esi
23083+ jnc 1f
23084+ mov %cs, %esi
23085+ cmp $__KERNEL_CS, %esi
23086+ jz 3f
23087+ ljmp $__KERNEL_CS, $3f
23088+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
23089+2:
23090+#ifdef CONFIG_PARAVIRT
23091+ mov %esi, %eax
23092+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
23093+#else
23094+ mov %esi, %cr0
23095+#endif
23096+3:
23097+#ifdef CONFIG_PARAVIRT
23098+ popl %ecx
23099+ popl %eax
23100+#endif
23101+ ret
23102+ENDPROC(pax_enter_kernel)
23103+
23104+ENTRY(pax_exit_kernel)
23105+#ifdef CONFIG_PARAVIRT
23106+ pushl %eax
23107+ pushl %ecx
23108+#endif
23109+ mov %cs, %esi
23110+ cmp $__KERNEXEC_KERNEL_CS, %esi
23111+ jnz 2f
23112+#ifdef CONFIG_PARAVIRT
23113+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
23114+ mov %eax, %esi
23115+#else
23116+ mov %cr0, %esi
23117+#endif
23118+ btr $16, %esi
23119+ ljmp $__KERNEL_CS, $1f
23120+1:
23121+#ifdef CONFIG_PARAVIRT
23122+ mov %esi, %eax
23123+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
23124+#else
23125+ mov %esi, %cr0
23126+#endif
23127+2:
23128+#ifdef CONFIG_PARAVIRT
23129+ popl %ecx
23130+ popl %eax
23131+#endif
23132+ ret
23133+ENDPROC(pax_exit_kernel)
23134+#endif
23135+
23136+ .macro pax_erase_kstack
23137+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
23138+ call pax_erase_kstack
23139+#endif
23140+ .endm
23141+
23142+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
23143+/*
23144+ * ebp: thread_info
23145+ */
23146+ENTRY(pax_erase_kstack)
23147+ pushl %edi
23148+ pushl %ecx
23149+ pushl %eax
23150+
23151+ mov TI_lowest_stack(%ebp), %edi
23152+ mov $-0xBEEF, %eax
23153+ std
23154+
23155+1: mov %edi, %ecx
23156+ and $THREAD_SIZE_asm - 1, %ecx
23157+ shr $2, %ecx
23158+ repne scasl
23159+ jecxz 2f
23160+
23161+ cmp $2*16, %ecx
23162+ jc 2f
23163+
23164+ mov $2*16, %ecx
23165+ repe scasl
23166+ jecxz 2f
23167+ jne 1b
23168+
23169+2: cld
23170+ mov %esp, %ecx
23171+ sub %edi, %ecx
23172+
23173+ cmp $THREAD_SIZE_asm, %ecx
23174+ jb 3f
23175+ ud2
23176+3:
23177+
23178+ shr $2, %ecx
23179+ rep stosl
23180+
23181+ mov TI_task_thread_sp0(%ebp), %edi
23182+ sub $128, %edi
23183+ mov %edi, TI_lowest_stack(%ebp)
23184+
23185+ popl %eax
23186+ popl %ecx
23187+ popl %edi
23188+ ret
23189+ENDPROC(pax_erase_kstack)
23190+#endif
23191+
23192+.macro __SAVE_ALL _DS
23193 cld
23194 PUSH_GS
23195 pushl_cfi %fs
23196@@ -206,7 +346,7 @@
23197 CFI_REL_OFFSET ecx, 0
23198 pushl_cfi %ebx
23199 CFI_REL_OFFSET ebx, 0
23200- movl $(__USER_DS), %edx
23201+ movl $\_DS, %edx
23202 movl %edx, %ds
23203 movl %edx, %es
23204 movl $(__KERNEL_PERCPU), %edx
23205@@ -214,6 +354,15 @@
23206 SET_KERNEL_GS %edx
23207 .endm
23208
23209+.macro SAVE_ALL
23210+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
23211+ __SAVE_ALL __KERNEL_DS
23212+ pax_enter_kernel
23213+#else
23214+ __SAVE_ALL __USER_DS
23215+#endif
23216+.endm
23217+
23218 .macro RESTORE_INT_REGS
23219 popl_cfi %ebx
23220 CFI_RESTORE ebx
23221@@ -297,7 +446,7 @@ ENTRY(ret_from_fork)
23222 popfl_cfi
23223 jmp syscall_exit
23224 CFI_ENDPROC
23225-END(ret_from_fork)
23226+ENDPROC(ret_from_fork)
23227
23228 ENTRY(ret_from_kernel_thread)
23229 CFI_STARTPROC
23230@@ -340,7 +489,15 @@ ret_from_intr:
23231 andl $SEGMENT_RPL_MASK, %eax
23232 #endif
23233 cmpl $USER_RPL, %eax
23234+
23235+#ifdef CONFIG_PAX_KERNEXEC
23236+ jae resume_userspace
23237+
23238+ pax_exit_kernel
23239+ jmp resume_kernel
23240+#else
23241 jb resume_kernel # not returning to v8086 or userspace
23242+#endif
23243
23244 ENTRY(resume_userspace)
23245 LOCKDEP_SYS_EXIT
23246@@ -352,8 +509,8 @@ ENTRY(resume_userspace)
23247 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
23248 # int/exception return?
23249 jne work_pending
23250- jmp restore_all
23251-END(ret_from_exception)
23252+ jmp restore_all_pax
23253+ENDPROC(ret_from_exception)
23254
23255 #ifdef CONFIG_PREEMPT
23256 ENTRY(resume_kernel)
23257@@ -365,7 +522,7 @@ need_resched:
23258 jz restore_all
23259 call preempt_schedule_irq
23260 jmp need_resched
23261-END(resume_kernel)
23262+ENDPROC(resume_kernel)
23263 #endif
23264 CFI_ENDPROC
23265
23266@@ -395,30 +552,45 @@ sysenter_past_esp:
23267 /*CFI_REL_OFFSET cs, 0*/
23268 /*
23269 * Push current_thread_info()->sysenter_return to the stack.
23270- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
23271- * pushed above; +8 corresponds to copy_thread's esp0 setting.
23272 */
23273- pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
23274+ pushl_cfi $0
23275 CFI_REL_OFFSET eip, 0
23276
23277 pushl_cfi %eax
23278 SAVE_ALL
23279+ GET_THREAD_INFO(%ebp)
23280+ movl TI_sysenter_return(%ebp),%ebp
23281+ movl %ebp,PT_EIP(%esp)
23282 ENABLE_INTERRUPTS(CLBR_NONE)
23283
23284 /*
23285 * Load the potential sixth argument from user stack.
23286 * Careful about security.
23287 */
23288+ movl PT_OLDESP(%esp),%ebp
23289+
23290+#ifdef CONFIG_PAX_MEMORY_UDEREF
23291+ mov PT_OLDSS(%esp),%ds
23292+1: movl %ds:(%ebp),%ebp
23293+ push %ss
23294+ pop %ds
23295+#else
23296 cmpl $__PAGE_OFFSET-3,%ebp
23297 jae syscall_fault
23298 ASM_STAC
23299 1: movl (%ebp),%ebp
23300 ASM_CLAC
23301+#endif
23302+
23303 movl %ebp,PT_EBP(%esp)
23304 _ASM_EXTABLE(1b,syscall_fault)
23305
23306 GET_THREAD_INFO(%ebp)
23307
23308+#ifdef CONFIG_PAX_RANDKSTACK
23309+ pax_erase_kstack
23310+#endif
23311+
23312 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
23313 jnz sysenter_audit
23314 sysenter_do_call:
23315@@ -434,12 +606,24 @@ sysenter_after_call:
23316 testl $_TIF_ALLWORK_MASK, %ecx
23317 jne sysexit_audit
23318 sysenter_exit:
23319+
23320+#ifdef CONFIG_PAX_RANDKSTACK
23321+ pushl_cfi %eax
23322+ movl %esp, %eax
23323+ call pax_randomize_kstack
23324+ popl_cfi %eax
23325+#endif
23326+
23327+ pax_erase_kstack
23328+
23329 /* if something modifies registers it must also disable sysexit */
23330 movl PT_EIP(%esp), %edx
23331 movl PT_OLDESP(%esp), %ecx
23332 xorl %ebp,%ebp
23333 TRACE_IRQS_ON
23334 1: mov PT_FS(%esp), %fs
23335+2: mov PT_DS(%esp), %ds
23336+3: mov PT_ES(%esp), %es
23337 PTGS_TO_GS
23338 ENABLE_INTERRUPTS_SYSEXIT
23339
23340@@ -456,6 +640,9 @@ sysenter_audit:
23341 movl %eax,%edx /* 2nd arg: syscall number */
23342 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
23343 call __audit_syscall_entry
23344+
23345+ pax_erase_kstack
23346+
23347 pushl_cfi %ebx
23348 movl PT_EAX(%esp),%eax /* reload syscall number */
23349 jmp sysenter_do_call
23350@@ -481,10 +668,16 @@ sysexit_audit:
23351
23352 CFI_ENDPROC
23353 .pushsection .fixup,"ax"
23354-2: movl $0,PT_FS(%esp)
23355+4: movl $0,PT_FS(%esp)
23356+ jmp 1b
23357+5: movl $0,PT_DS(%esp)
23358+ jmp 1b
23359+6: movl $0,PT_ES(%esp)
23360 jmp 1b
23361 .popsection
23362- _ASM_EXTABLE(1b,2b)
23363+ _ASM_EXTABLE(1b,4b)
23364+ _ASM_EXTABLE(2b,5b)
23365+ _ASM_EXTABLE(3b,6b)
23366 PTGS_TO_GS_EX
23367 ENDPROC(ia32_sysenter_target)
23368
23369@@ -495,6 +688,11 @@ ENTRY(system_call)
23370 pushl_cfi %eax # save orig_eax
23371 SAVE_ALL
23372 GET_THREAD_INFO(%ebp)
23373+
23374+#ifdef CONFIG_PAX_RANDKSTACK
23375+ pax_erase_kstack
23376+#endif
23377+
23378 # system call tracing in operation / emulation
23379 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
23380 jnz syscall_trace_entry
23381@@ -514,6 +712,15 @@ syscall_exit:
23382 testl $_TIF_ALLWORK_MASK, %ecx # current->work
23383 jne syscall_exit_work
23384
23385+restore_all_pax:
23386+
23387+#ifdef CONFIG_PAX_RANDKSTACK
23388+ movl %esp, %eax
23389+ call pax_randomize_kstack
23390+#endif
23391+
23392+ pax_erase_kstack
23393+
23394 restore_all:
23395 TRACE_IRQS_IRET
23396 restore_all_notrace:
23397@@ -568,14 +775,34 @@ ldt_ss:
23398 * compensating for the offset by changing to the ESPFIX segment with
23399 * a base address that matches for the difference.
23400 */
23401-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
23402+#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
23403 mov %esp, %edx /* load kernel esp */
23404 mov PT_OLDESP(%esp), %eax /* load userspace esp */
23405 mov %dx, %ax /* eax: new kernel esp */
23406 sub %eax, %edx /* offset (low word is 0) */
23407+#ifdef CONFIG_SMP
23408+ movl PER_CPU_VAR(cpu_number), %ebx
23409+ shll $PAGE_SHIFT_asm, %ebx
23410+ addl $cpu_gdt_table, %ebx
23411+#else
23412+ movl $cpu_gdt_table, %ebx
23413+#endif
23414 shr $16, %edx
23415- mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
23416- mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
23417+
23418+#ifdef CONFIG_PAX_KERNEXEC
23419+ mov %cr0, %esi
23420+ btr $16, %esi
23421+ mov %esi, %cr0
23422+#endif
23423+
23424+ mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
23425+ mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
23426+
23427+#ifdef CONFIG_PAX_KERNEXEC
23428+ bts $16, %esi
23429+ mov %esi, %cr0
23430+#endif
23431+
23432 pushl_cfi $__ESPFIX_SS
23433 pushl_cfi %eax /* new kernel esp */
23434 /* Disable interrupts, but do not irqtrace this section: we
23435@@ -605,20 +832,18 @@ work_resched:
23436 movl TI_flags(%ebp), %ecx
23437 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
23438 # than syscall tracing?
23439- jz restore_all
23440+ jz restore_all_pax
23441 testb $_TIF_NEED_RESCHED, %cl
23442 jnz work_resched
23443
23444 work_notifysig: # deal with pending signals and
23445 # notify-resume requests
23446+ movl %esp, %eax
23447 #ifdef CONFIG_VM86
23448 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
23449- movl %esp, %eax
23450 jne work_notifysig_v86 # returning to kernel-space or
23451 # vm86-space
23452 1:
23453-#else
23454- movl %esp, %eax
23455 #endif
23456 TRACE_IRQS_ON
23457 ENABLE_INTERRUPTS(CLBR_NONE)
23458@@ -639,7 +864,7 @@ work_notifysig_v86:
23459 movl %eax, %esp
23460 jmp 1b
23461 #endif
23462-END(work_pending)
23463+ENDPROC(work_pending)
23464
23465 # perform syscall exit tracing
23466 ALIGN
23467@@ -647,11 +872,14 @@ syscall_trace_entry:
23468 movl $-ENOSYS,PT_EAX(%esp)
23469 movl %esp, %eax
23470 call syscall_trace_enter
23471+
23472+ pax_erase_kstack
23473+
23474 /* What it returned is what we'll actually use. */
23475 cmpl $(NR_syscalls), %eax
23476 jnae syscall_call
23477 jmp syscall_exit
23478-END(syscall_trace_entry)
23479+ENDPROC(syscall_trace_entry)
23480
23481 # perform syscall exit tracing
23482 ALIGN
23483@@ -664,26 +892,30 @@ syscall_exit_work:
23484 movl %esp, %eax
23485 call syscall_trace_leave
23486 jmp resume_userspace
23487-END(syscall_exit_work)
23488+ENDPROC(syscall_exit_work)
23489 CFI_ENDPROC
23490
23491 RING0_INT_FRAME # can't unwind into user space anyway
23492 syscall_fault:
23493+#ifdef CONFIG_PAX_MEMORY_UDEREF
23494+ push %ss
23495+ pop %ds
23496+#endif
23497 ASM_CLAC
23498 GET_THREAD_INFO(%ebp)
23499 movl $-EFAULT,PT_EAX(%esp)
23500 jmp resume_userspace
23501-END(syscall_fault)
23502+ENDPROC(syscall_fault)
23503
23504 syscall_badsys:
23505 movl $-ENOSYS,%eax
23506 jmp syscall_after_call
23507-END(syscall_badsys)
23508+ENDPROC(syscall_badsys)
23509
23510 sysenter_badsys:
23511 movl $-ENOSYS,%eax
23512 jmp sysenter_after_call
23513-END(sysenter_badsys)
23514+ENDPROC(sysenter_badsys)
23515 CFI_ENDPROC
23516
23517 .macro FIXUP_ESPFIX_STACK
23518@@ -696,8 +928,15 @@ END(sysenter_badsys)
23519 */
23520 #ifdef CONFIG_X86_ESPFIX32
23521 /* fixup the stack */
23522- mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
23523- mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
23524+#ifdef CONFIG_SMP
23525+ movl PER_CPU_VAR(cpu_number), %ebx
23526+ shll $PAGE_SHIFT_asm, %ebx
23527+ addl $cpu_gdt_table, %ebx
23528+#else
23529+ movl $cpu_gdt_table, %ebx
23530+#endif
23531+ mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
23532+ mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
23533 shl $16, %eax
23534 addl %esp, %eax /* the adjusted stack pointer */
23535 pushl_cfi $__KERNEL_DS
23536@@ -753,7 +992,7 @@ vector=vector+1
23537 .endr
23538 2: jmp common_interrupt
23539 .endr
23540-END(irq_entries_start)
23541+ENDPROC(irq_entries_start)
23542
23543 .previous
23544 END(interrupt)
23545@@ -810,7 +1049,7 @@ ENTRY(coprocessor_error)
23546 pushl_cfi $do_coprocessor_error
23547 jmp error_code
23548 CFI_ENDPROC
23549-END(coprocessor_error)
23550+ENDPROC(coprocessor_error)
23551
23552 ENTRY(simd_coprocessor_error)
23553 RING0_INT_FRAME
23554@@ -823,7 +1062,7 @@ ENTRY(simd_coprocessor_error)
23555 .section .altinstructions,"a"
23556 altinstruction_entry 661b, 663f, X86_FEATURE_XMM, 662b-661b, 664f-663f
23557 .previous
23558-.section .altinstr_replacement,"ax"
23559+.section .altinstr_replacement,"a"
23560 663: pushl $do_simd_coprocessor_error
23561 664:
23562 .previous
23563@@ -832,7 +1071,7 @@ ENTRY(simd_coprocessor_error)
23564 #endif
23565 jmp error_code
23566 CFI_ENDPROC
23567-END(simd_coprocessor_error)
23568+ENDPROC(simd_coprocessor_error)
23569
23570 ENTRY(device_not_available)
23571 RING0_INT_FRAME
23572@@ -841,18 +1080,18 @@ ENTRY(device_not_available)
23573 pushl_cfi $do_device_not_available
23574 jmp error_code
23575 CFI_ENDPROC
23576-END(device_not_available)
23577+ENDPROC(device_not_available)
23578
23579 #ifdef CONFIG_PARAVIRT
23580 ENTRY(native_iret)
23581 iret
23582 _ASM_EXTABLE(native_iret, iret_exc)
23583-END(native_iret)
23584+ENDPROC(native_iret)
23585
23586 ENTRY(native_irq_enable_sysexit)
23587 sti
23588 sysexit
23589-END(native_irq_enable_sysexit)
23590+ENDPROC(native_irq_enable_sysexit)
23591 #endif
23592
23593 ENTRY(overflow)
23594@@ -862,7 +1101,7 @@ ENTRY(overflow)
23595 pushl_cfi $do_overflow
23596 jmp error_code
23597 CFI_ENDPROC
23598-END(overflow)
23599+ENDPROC(overflow)
23600
23601 ENTRY(bounds)
23602 RING0_INT_FRAME
23603@@ -871,7 +1110,7 @@ ENTRY(bounds)
23604 pushl_cfi $do_bounds
23605 jmp error_code
23606 CFI_ENDPROC
23607-END(bounds)
23608+ENDPROC(bounds)
23609
23610 ENTRY(invalid_op)
23611 RING0_INT_FRAME
23612@@ -880,7 +1119,7 @@ ENTRY(invalid_op)
23613 pushl_cfi $do_invalid_op
23614 jmp error_code
23615 CFI_ENDPROC
23616-END(invalid_op)
23617+ENDPROC(invalid_op)
23618
23619 ENTRY(coprocessor_segment_overrun)
23620 RING0_INT_FRAME
23621@@ -889,7 +1128,7 @@ ENTRY(coprocessor_segment_overrun)
23622 pushl_cfi $do_coprocessor_segment_overrun
23623 jmp error_code
23624 CFI_ENDPROC
23625-END(coprocessor_segment_overrun)
23626+ENDPROC(coprocessor_segment_overrun)
23627
23628 ENTRY(invalid_TSS)
23629 RING0_EC_FRAME
23630@@ -897,7 +1136,7 @@ ENTRY(invalid_TSS)
23631 pushl_cfi $do_invalid_TSS
23632 jmp error_code
23633 CFI_ENDPROC
23634-END(invalid_TSS)
23635+ENDPROC(invalid_TSS)
23636
23637 ENTRY(segment_not_present)
23638 RING0_EC_FRAME
23639@@ -905,7 +1144,7 @@ ENTRY(segment_not_present)
23640 pushl_cfi $do_segment_not_present
23641 jmp error_code
23642 CFI_ENDPROC
23643-END(segment_not_present)
23644+ENDPROC(segment_not_present)
23645
23646 ENTRY(stack_segment)
23647 RING0_EC_FRAME
23648@@ -913,7 +1152,7 @@ ENTRY(stack_segment)
23649 pushl_cfi $do_stack_segment
23650 jmp error_code
23651 CFI_ENDPROC
23652-END(stack_segment)
23653+ENDPROC(stack_segment)
23654
23655 ENTRY(alignment_check)
23656 RING0_EC_FRAME
23657@@ -921,7 +1160,7 @@ ENTRY(alignment_check)
23658 pushl_cfi $do_alignment_check
23659 jmp error_code
23660 CFI_ENDPROC
23661-END(alignment_check)
23662+ENDPROC(alignment_check)
23663
23664 ENTRY(divide_error)
23665 RING0_INT_FRAME
23666@@ -930,7 +1169,7 @@ ENTRY(divide_error)
23667 pushl_cfi $do_divide_error
23668 jmp error_code
23669 CFI_ENDPROC
23670-END(divide_error)
23671+ENDPROC(divide_error)
23672
23673 #ifdef CONFIG_X86_MCE
23674 ENTRY(machine_check)
23675@@ -940,7 +1179,7 @@ ENTRY(machine_check)
23676 pushl_cfi machine_check_vector
23677 jmp error_code
23678 CFI_ENDPROC
23679-END(machine_check)
23680+ENDPROC(machine_check)
23681 #endif
23682
23683 ENTRY(spurious_interrupt_bug)
23684@@ -950,7 +1189,7 @@ ENTRY(spurious_interrupt_bug)
23685 pushl_cfi $do_spurious_interrupt_bug
23686 jmp error_code
23687 CFI_ENDPROC
23688-END(spurious_interrupt_bug)
23689+ENDPROC(spurious_interrupt_bug)
23690
23691 #ifdef CONFIG_XEN
23692 /* Xen doesn't set %esp to be precisely what the normal sysenter
23693@@ -1056,7 +1295,7 @@ BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
23694
23695 ENTRY(mcount)
23696 ret
23697-END(mcount)
23698+ENDPROC(mcount)
23699
23700 ENTRY(ftrace_caller)
23701 pushl %eax
23702@@ -1086,7 +1325,7 @@ ftrace_graph_call:
23703 .globl ftrace_stub
23704 ftrace_stub:
23705 ret
23706-END(ftrace_caller)
23707+ENDPROC(ftrace_caller)
23708
23709 ENTRY(ftrace_regs_caller)
23710 pushf /* push flags before compare (in cs location) */
23711@@ -1184,7 +1423,7 @@ trace:
23712 popl %ecx
23713 popl %eax
23714 jmp ftrace_stub
23715-END(mcount)
23716+ENDPROC(mcount)
23717 #endif /* CONFIG_DYNAMIC_FTRACE */
23718 #endif /* CONFIG_FUNCTION_TRACER */
23719
23720@@ -1202,7 +1441,7 @@ ENTRY(ftrace_graph_caller)
23721 popl %ecx
23722 popl %eax
23723 ret
23724-END(ftrace_graph_caller)
23725+ENDPROC(ftrace_graph_caller)
23726
23727 .globl return_to_handler
23728 return_to_handler:
23729@@ -1263,15 +1502,18 @@ error_code:
23730 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
23731 REG_TO_PTGS %ecx
23732 SET_KERNEL_GS %ecx
23733- movl $(__USER_DS), %ecx
23734+ movl $(__KERNEL_DS), %ecx
23735 movl %ecx, %ds
23736 movl %ecx, %es
23737+
23738+ pax_enter_kernel
23739+
23740 TRACE_IRQS_OFF
23741 movl %esp,%eax # pt_regs pointer
23742 call *%edi
23743 jmp ret_from_exception
23744 CFI_ENDPROC
23745-END(page_fault)
23746+ENDPROC(page_fault)
23747
23748 /*
23749 * Debug traps and NMI can happen at the one SYSENTER instruction
23750@@ -1314,7 +1556,7 @@ debug_stack_correct:
23751 call do_debug
23752 jmp ret_from_exception
23753 CFI_ENDPROC
23754-END(debug)
23755+ENDPROC(debug)
23756
23757 /*
23758 * NMI is doubly nasty. It can happen _while_ we're handling
23759@@ -1354,6 +1596,9 @@ nmi_stack_correct:
23760 xorl %edx,%edx # zero error code
23761 movl %esp,%eax # pt_regs pointer
23762 call do_nmi
23763+
23764+ pax_exit_kernel
23765+
23766 jmp restore_all_notrace
23767 CFI_ENDPROC
23768
23769@@ -1391,13 +1636,16 @@ nmi_espfix_stack:
23770 FIXUP_ESPFIX_STACK # %eax == %esp
23771 xorl %edx,%edx # zero error code
23772 call do_nmi
23773+
23774+ pax_exit_kernel
23775+
23776 RESTORE_REGS
23777 lss 12+4(%esp), %esp # back to espfix stack
23778 CFI_ADJUST_CFA_OFFSET -24
23779 jmp irq_return
23780 #endif
23781 CFI_ENDPROC
23782-END(nmi)
23783+ENDPROC(nmi)
23784
23785 ENTRY(int3)
23786 RING0_INT_FRAME
23787@@ -1410,14 +1658,14 @@ ENTRY(int3)
23788 call do_int3
23789 jmp ret_from_exception
23790 CFI_ENDPROC
23791-END(int3)
23792+ENDPROC(int3)
23793
23794 ENTRY(general_protection)
23795 RING0_EC_FRAME
23796 pushl_cfi $do_general_protection
23797 jmp error_code
23798 CFI_ENDPROC
23799-END(general_protection)
23800+ENDPROC(general_protection)
23801
23802 #ifdef CONFIG_KVM_GUEST
23803 ENTRY(async_page_fault)
23804@@ -1426,6 +1674,6 @@ ENTRY(async_page_fault)
23805 pushl_cfi $do_async_page_fault
23806 jmp error_code
23807 CFI_ENDPROC
23808-END(async_page_fault)
23809+ENDPROC(async_page_fault)
23810 #endif
23811
23812diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
23813index 2fac134..b020fca 100644
23814--- a/arch/x86/kernel/entry_64.S
23815+++ b/arch/x86/kernel/entry_64.S
23816@@ -59,6 +59,8 @@
23817 #include <asm/smap.h>
23818 #include <asm/pgtable_types.h>
23819 #include <linux/err.h>
23820+#include <asm/pgtable.h>
23821+#include <asm/alternative-asm.h>
23822
23823 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
23824 #include <linux/elf-em.h>
23825@@ -81,6 +83,430 @@ ENTRY(native_usergs_sysret64)
23826 ENDPROC(native_usergs_sysret64)
23827 #endif /* CONFIG_PARAVIRT */
23828
23829+ .macro ljmpq sel, off
23830+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
23831+ .byte 0x48; ljmp *1234f(%rip)
23832+ .pushsection .rodata
23833+ .align 16
23834+ 1234: .quad \off; .word \sel
23835+ .popsection
23836+#else
23837+ pushq $\sel
23838+ pushq $\off
23839+ lretq
23840+#endif
23841+ .endm
23842+
23843+ .macro pax_enter_kernel
23844+ pax_set_fptr_mask
23845+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
23846+ call pax_enter_kernel
23847+#endif
23848+ .endm
23849+
23850+ .macro pax_exit_kernel
23851+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
23852+ call pax_exit_kernel
23853+#endif
23854+
23855+ .endm
23856+
23857+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
23858+ENTRY(pax_enter_kernel)
23859+ pushq %rdi
23860+
23861+#ifdef CONFIG_PARAVIRT
23862+ PV_SAVE_REGS(CLBR_RDI)
23863+#endif
23864+
23865+#ifdef CONFIG_PAX_KERNEXEC
23866+ GET_CR0_INTO_RDI
23867+ bts $16,%rdi
23868+ jnc 3f
23869+ mov %cs,%edi
23870+ cmp $__KERNEL_CS,%edi
23871+ jnz 2f
23872+1:
23873+#endif
23874+
23875+#ifdef CONFIG_PAX_MEMORY_UDEREF
23876+ 661: jmp 111f
23877+ .pushsection .altinstr_replacement, "a"
23878+ 662: ASM_NOP2
23879+ .popsection
23880+ .pushsection .altinstructions, "a"
23881+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23882+ .popsection
23883+ GET_CR3_INTO_RDI
23884+ cmp $0,%dil
23885+ jnz 112f
23886+ mov $__KERNEL_DS,%edi
23887+ mov %edi,%ss
23888+ jmp 111f
23889+112: cmp $1,%dil
23890+ jz 113f
23891+ ud2
23892+113: sub $4097,%rdi
23893+ bts $63,%rdi
23894+ SET_RDI_INTO_CR3
23895+ mov $__UDEREF_KERNEL_DS,%edi
23896+ mov %edi,%ss
23897+111:
23898+#endif
23899+
23900+#ifdef CONFIG_PARAVIRT
23901+ PV_RESTORE_REGS(CLBR_RDI)
23902+#endif
23903+
23904+ popq %rdi
23905+ pax_force_retaddr
23906+ retq
23907+
23908+#ifdef CONFIG_PAX_KERNEXEC
23909+2: ljmpq __KERNEL_CS,1b
23910+3: ljmpq __KERNEXEC_KERNEL_CS,4f
23911+4: SET_RDI_INTO_CR0
23912+ jmp 1b
23913+#endif
23914+ENDPROC(pax_enter_kernel)
23915+
23916+ENTRY(pax_exit_kernel)
23917+ pushq %rdi
23918+
23919+#ifdef CONFIG_PARAVIRT
23920+ PV_SAVE_REGS(CLBR_RDI)
23921+#endif
23922+
23923+#ifdef CONFIG_PAX_KERNEXEC
23924+ mov %cs,%rdi
23925+ cmp $__KERNEXEC_KERNEL_CS,%edi
23926+ jz 2f
23927+ GET_CR0_INTO_RDI
23928+ bts $16,%rdi
23929+ jnc 4f
23930+1:
23931+#endif
23932+
23933+#ifdef CONFIG_PAX_MEMORY_UDEREF
23934+ 661: jmp 111f
23935+ .pushsection .altinstr_replacement, "a"
23936+ 662: ASM_NOP2
23937+ .popsection
23938+ .pushsection .altinstructions, "a"
23939+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23940+ .popsection
23941+ mov %ss,%edi
23942+ cmp $__UDEREF_KERNEL_DS,%edi
23943+ jnz 111f
23944+ GET_CR3_INTO_RDI
23945+ cmp $0,%dil
23946+ jz 112f
23947+ ud2
23948+112: add $4097,%rdi
23949+ bts $63,%rdi
23950+ SET_RDI_INTO_CR3
23951+ mov $__KERNEL_DS,%edi
23952+ mov %edi,%ss
23953+111:
23954+#endif
23955+
23956+#ifdef CONFIG_PARAVIRT
23957+ PV_RESTORE_REGS(CLBR_RDI);
23958+#endif
23959+
23960+ popq %rdi
23961+ pax_force_retaddr
23962+ retq
23963+
23964+#ifdef CONFIG_PAX_KERNEXEC
23965+2: GET_CR0_INTO_RDI
23966+ btr $16,%rdi
23967+ jnc 4f
23968+ ljmpq __KERNEL_CS,3f
23969+3: SET_RDI_INTO_CR0
23970+ jmp 1b
23971+4: ud2
23972+ jmp 4b
23973+#endif
23974+ENDPROC(pax_exit_kernel)
23975+#endif
23976+
23977+ .macro pax_enter_kernel_user
23978+ pax_set_fptr_mask
23979+#ifdef CONFIG_PAX_MEMORY_UDEREF
23980+ call pax_enter_kernel_user
23981+#endif
23982+ .endm
23983+
23984+ .macro pax_exit_kernel_user
23985+#ifdef CONFIG_PAX_MEMORY_UDEREF
23986+ call pax_exit_kernel_user
23987+#endif
23988+#ifdef CONFIG_PAX_RANDKSTACK
23989+ pushq %rax
23990+ pushq %r11
23991+ call pax_randomize_kstack
23992+ popq %r11
23993+ popq %rax
23994+#endif
23995+ .endm
23996+
23997+#ifdef CONFIG_PAX_MEMORY_UDEREF
23998+ENTRY(pax_enter_kernel_user)
23999+ pushq %rdi
24000+ pushq %rbx
24001+
24002+#ifdef CONFIG_PARAVIRT
24003+ PV_SAVE_REGS(CLBR_RDI)
24004+#endif
24005+
24006+ 661: jmp 111f
24007+ .pushsection .altinstr_replacement, "a"
24008+ 662: ASM_NOP2
24009+ .popsection
24010+ .pushsection .altinstructions, "a"
24011+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
24012+ .popsection
24013+ GET_CR3_INTO_RDI
24014+ cmp $1,%dil
24015+ jnz 4f
24016+ sub $4097,%rdi
24017+ bts $63,%rdi
24018+ SET_RDI_INTO_CR3
24019+ jmp 3f
24020+111:
24021+
24022+ GET_CR3_INTO_RDI
24023+ mov %rdi,%rbx
24024+ add $__START_KERNEL_map,%rbx
24025+ sub phys_base(%rip),%rbx
24026+
24027+#ifdef CONFIG_PARAVIRT
24028+ cmpl $0, pv_info+PARAVIRT_enabled
24029+ jz 1f
24030+ pushq %rdi
24031+ i = 0
24032+ .rept USER_PGD_PTRS
24033+ mov i*8(%rbx),%rsi
24034+ mov $0,%sil
24035+ lea i*8(%rbx),%rdi
24036+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
24037+ i = i + 1
24038+ .endr
24039+ popq %rdi
24040+ jmp 2f
24041+1:
24042+#endif
24043+
24044+ i = 0
24045+ .rept USER_PGD_PTRS
24046+ movb $0,i*8(%rbx)
24047+ i = i + 1
24048+ .endr
24049+
24050+2: SET_RDI_INTO_CR3
24051+
24052+#ifdef CONFIG_PAX_KERNEXEC
24053+ GET_CR0_INTO_RDI
24054+ bts $16,%rdi
24055+ SET_RDI_INTO_CR0
24056+#endif
24057+
24058+3:
24059+
24060+#ifdef CONFIG_PARAVIRT
24061+ PV_RESTORE_REGS(CLBR_RDI)
24062+#endif
24063+
24064+ popq %rbx
24065+ popq %rdi
24066+ pax_force_retaddr
24067+ retq
24068+4: ud2
24069+ENDPROC(pax_enter_kernel_user)
24070+
24071+ENTRY(pax_exit_kernel_user)
24072+ pushq %rdi
24073+ pushq %rbx
24074+
24075+#ifdef CONFIG_PARAVIRT
24076+ PV_SAVE_REGS(CLBR_RDI)
24077+#endif
24078+
24079+ GET_CR3_INTO_RDI
24080+ 661: jmp 1f
24081+ .pushsection .altinstr_replacement, "a"
24082+ 662: ASM_NOP2
24083+ .popsection
24084+ .pushsection .altinstructions, "a"
24085+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
24086+ .popsection
24087+ cmp $0,%dil
24088+ jnz 3f
24089+ add $4097,%rdi
24090+ bts $63,%rdi
24091+ SET_RDI_INTO_CR3
24092+ jmp 2f
24093+1:
24094+
24095+ mov %rdi,%rbx
24096+
24097+#ifdef CONFIG_PAX_KERNEXEC
24098+ GET_CR0_INTO_RDI
24099+ btr $16,%rdi
24100+ jnc 3f
24101+ SET_RDI_INTO_CR0
24102+#endif
24103+
24104+ add $__START_KERNEL_map,%rbx
24105+ sub phys_base(%rip),%rbx
24106+
24107+#ifdef CONFIG_PARAVIRT
24108+ cmpl $0, pv_info+PARAVIRT_enabled
24109+ jz 1f
24110+ i = 0
24111+ .rept USER_PGD_PTRS
24112+ mov i*8(%rbx),%rsi
24113+ mov $0x67,%sil
24114+ lea i*8(%rbx),%rdi
24115+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
24116+ i = i + 1
24117+ .endr
24118+ jmp 2f
24119+1:
24120+#endif
24121+
24122+ i = 0
24123+ .rept USER_PGD_PTRS
24124+ movb $0x67,i*8(%rbx)
24125+ i = i + 1
24126+ .endr
24127+2:
24128+
24129+#ifdef CONFIG_PARAVIRT
24130+ PV_RESTORE_REGS(CLBR_RDI)
24131+#endif
24132+
24133+ popq %rbx
24134+ popq %rdi
24135+ pax_force_retaddr
24136+ retq
24137+3: ud2
24138+ENDPROC(pax_exit_kernel_user)
24139+#endif
24140+
24141+ .macro pax_enter_kernel_nmi
24142+ pax_set_fptr_mask
24143+
24144+#ifdef CONFIG_PAX_KERNEXEC
24145+ GET_CR0_INTO_RDI
24146+ bts $16,%rdi
24147+ jc 110f
24148+ SET_RDI_INTO_CR0
24149+ or $2,%ebx
24150+110:
24151+#endif
24152+
24153+#ifdef CONFIG_PAX_MEMORY_UDEREF
24154+ 661: jmp 111f
24155+ .pushsection .altinstr_replacement, "a"
24156+ 662: ASM_NOP2
24157+ .popsection
24158+ .pushsection .altinstructions, "a"
24159+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
24160+ .popsection
24161+ GET_CR3_INTO_RDI
24162+ cmp $0,%dil
24163+ jz 111f
24164+ sub $4097,%rdi
24165+ or $4,%ebx
24166+ bts $63,%rdi
24167+ SET_RDI_INTO_CR3
24168+ mov $__UDEREF_KERNEL_DS,%edi
24169+ mov %edi,%ss
24170+111:
24171+#endif
24172+ .endm
24173+
24174+ .macro pax_exit_kernel_nmi
24175+#ifdef CONFIG_PAX_KERNEXEC
24176+ btr $1,%ebx
24177+ jnc 110f
24178+ GET_CR0_INTO_RDI
24179+ btr $16,%rdi
24180+ SET_RDI_INTO_CR0
24181+110:
24182+#endif
24183+
24184+#ifdef CONFIG_PAX_MEMORY_UDEREF
24185+ btr $2,%ebx
24186+ jnc 111f
24187+ GET_CR3_INTO_RDI
24188+ add $4097,%rdi
24189+ bts $63,%rdi
24190+ SET_RDI_INTO_CR3
24191+ mov $__KERNEL_DS,%edi
24192+ mov %edi,%ss
24193+111:
24194+#endif
24195+ .endm
24196+
24197+ .macro pax_erase_kstack
24198+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
24199+ call pax_erase_kstack
24200+#endif
24201+ .endm
24202+
24203+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
24204+ENTRY(pax_erase_kstack)
24205+ pushq %rdi
24206+ pushq %rcx
24207+ pushq %rax
24208+ pushq %r11
24209+
24210+ GET_THREAD_INFO(%r11)
24211+ mov TI_lowest_stack(%r11), %rdi
24212+ mov $-0xBEEF, %rax
24213+ std
24214+
24215+1: mov %edi, %ecx
24216+ and $THREAD_SIZE_asm - 1, %ecx
24217+ shr $3, %ecx
24218+ repne scasq
24219+ jecxz 2f
24220+
24221+ cmp $2*8, %ecx
24222+ jc 2f
24223+
24224+ mov $2*8, %ecx
24225+ repe scasq
24226+ jecxz 2f
24227+ jne 1b
24228+
24229+2: cld
24230+ mov %esp, %ecx
24231+ sub %edi, %ecx
24232+
24233+ cmp $THREAD_SIZE_asm, %rcx
24234+ jb 3f
24235+ ud2
24236+3:
24237+
24238+ shr $3, %ecx
24239+ rep stosq
24240+
24241+ mov TI_task_thread_sp0(%r11), %rdi
24242+ sub $256, %rdi
24243+ mov %rdi, TI_lowest_stack(%r11)
24244+
24245+ popq %r11
24246+ popq %rax
24247+ popq %rcx
24248+ popq %rdi
24249+ pax_force_retaddr
24250+ ret
24251+ENDPROC(pax_erase_kstack)
24252+#endif
24253
24254 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
24255 #ifdef CONFIG_TRACE_IRQFLAGS
24256@@ -117,7 +543,7 @@ ENDPROC(native_usergs_sysret64)
24257 .endm
24258
24259 .macro TRACE_IRQS_IRETQ_DEBUG offset=ARGOFFSET
24260- bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */
24261+ bt $X86_EFLAGS_IF_BIT,EFLAGS-\offset(%rsp) /* interrupts off? */
24262 jnc 1f
24263 TRACE_IRQS_ON_DEBUG
24264 1:
24265@@ -155,27 +581,6 @@ ENDPROC(native_usergs_sysret64)
24266 movq \tmp,R11+\offset(%rsp)
24267 .endm
24268
24269- .macro FAKE_STACK_FRAME child_rip
24270- /* push in order ss, rsp, eflags, cs, rip */
24271- xorl %eax, %eax
24272- pushq_cfi $__KERNEL_DS /* ss */
24273- /*CFI_REL_OFFSET ss,0*/
24274- pushq_cfi %rax /* rsp */
24275- CFI_REL_OFFSET rsp,0
24276- pushq_cfi $(X86_EFLAGS_IF|X86_EFLAGS_FIXED) /* eflags - interrupts on */
24277- /*CFI_REL_OFFSET rflags,0*/
24278- pushq_cfi $__KERNEL_CS /* cs */
24279- /*CFI_REL_OFFSET cs,0*/
24280- pushq_cfi \child_rip /* rip */
24281- CFI_REL_OFFSET rip,0
24282- pushq_cfi %rax /* orig rax */
24283- .endm
24284-
24285- .macro UNFAKE_STACK_FRAME
24286- addq $8*6, %rsp
24287- CFI_ADJUST_CFA_OFFSET -(6*8)
24288- .endm
24289-
24290 /*
24291 * initial frame state for interrupts (and exceptions without error code)
24292 */
24293@@ -241,25 +646,26 @@ ENDPROC(native_usergs_sysret64)
24294 /* save partial stack frame */
24295 .macro SAVE_ARGS_IRQ
24296 cld
24297- /* start from rbp in pt_regs and jump over */
24298- movq_cfi rdi, (RDI-RBP)
24299- movq_cfi rsi, (RSI-RBP)
24300- movq_cfi rdx, (RDX-RBP)
24301- movq_cfi rcx, (RCX-RBP)
24302- movq_cfi rax, (RAX-RBP)
24303- movq_cfi r8, (R8-RBP)
24304- movq_cfi r9, (R9-RBP)
24305- movq_cfi r10, (R10-RBP)
24306- movq_cfi r11, (R11-RBP)
24307+ /* start from r15 in pt_regs and jump over */
24308+ movq_cfi rdi, RDI
24309+ movq_cfi rsi, RSI
24310+ movq_cfi rdx, RDX
24311+ movq_cfi rcx, RCX
24312+ movq_cfi rax, RAX
24313+ movq_cfi r8, R8
24314+ movq_cfi r9, R9
24315+ movq_cfi r10, R10
24316+ movq_cfi r11, R11
24317+ movq_cfi r12, R12
24318
24319 /* Save rbp so that we can unwind from get_irq_regs() */
24320- movq_cfi rbp, 0
24321+ movq_cfi rbp, RBP
24322
24323 /* Save previous stack value */
24324 movq %rsp, %rsi
24325
24326- leaq -RBP(%rsp),%rdi /* arg1 for handler */
24327- testl $3, CS-RBP(%rsi)
24328+ movq %rsp,%rdi /* arg1 for handler */
24329+ testb $3, CS(%rsi)
24330 je 1f
24331 SWAPGS
24332 /*
24333@@ -279,6 +685,18 @@ ENDPROC(native_usergs_sysret64)
24334 0x06 /* DW_OP_deref */, \
24335 0x08 /* DW_OP_const1u */, SS+8-RBP, \
24336 0x22 /* DW_OP_plus */
24337+
24338+#ifdef CONFIG_PAX_MEMORY_UDEREF
24339+ testb $3, CS(%rdi)
24340+ jnz 1f
24341+ pax_enter_kernel
24342+ jmp 2f
24343+1: pax_enter_kernel_user
24344+2:
24345+#else
24346+ pax_enter_kernel
24347+#endif
24348+
24349 /* We entered an interrupt context - irqs are off: */
24350 TRACE_IRQS_OFF
24351 .endm
24352@@ -308,9 +726,52 @@ ENTRY(save_paranoid)
24353 js 1f /* negative -> in kernel */
24354 SWAPGS
24355 xorl %ebx,%ebx
24356-1: ret
24357+1:
24358+#ifdef CONFIG_PAX_MEMORY_UDEREF
24359+ testb $3, CS+8(%rsp)
24360+ jnz 1f
24361+ pax_enter_kernel
24362+ jmp 2f
24363+1: pax_enter_kernel_user
24364+2:
24365+#else
24366+ pax_enter_kernel
24367+#endif
24368+ pax_force_retaddr
24369+ ret
24370 CFI_ENDPROC
24371-END(save_paranoid)
24372+ENDPROC(save_paranoid)
24373+
24374+ENTRY(save_paranoid_nmi)
24375+ XCPT_FRAME 1 RDI+8
24376+ cld
24377+ movq_cfi rdi, RDI+8
24378+ movq_cfi rsi, RSI+8
24379+ movq_cfi rdx, RDX+8
24380+ movq_cfi rcx, RCX+8
24381+ movq_cfi rax, RAX+8
24382+ movq_cfi r8, R8+8
24383+ movq_cfi r9, R9+8
24384+ movq_cfi r10, R10+8
24385+ movq_cfi r11, R11+8
24386+ movq_cfi rbx, RBX+8
24387+ movq_cfi rbp, RBP+8
24388+ movq_cfi r12, R12+8
24389+ movq_cfi r13, R13+8
24390+ movq_cfi r14, R14+8
24391+ movq_cfi r15, R15+8
24392+ movl $1,%ebx
24393+ movl $MSR_GS_BASE,%ecx
24394+ rdmsr
24395+ testl %edx,%edx
24396+ js 1f /* negative -> in kernel */
24397+ SWAPGS
24398+ xorl %ebx,%ebx
24399+1: pax_enter_kernel_nmi
24400+ pax_force_retaddr
24401+ ret
24402+ CFI_ENDPROC
24403+ENDPROC(save_paranoid_nmi)
24404
24405 /*
24406 * A newly forked process directly context switches into this address.
24407@@ -331,7 +792,7 @@ ENTRY(ret_from_fork)
24408
24409 RESTORE_REST
24410
24411- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
24412+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
24413 jz 1f
24414
24415 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
24416@@ -341,15 +802,13 @@ ENTRY(ret_from_fork)
24417 jmp ret_from_sys_call # go to the SYSRET fastpath
24418
24419 1:
24420- subq $REST_SKIP, %rsp # leave space for volatiles
24421- CFI_ADJUST_CFA_OFFSET REST_SKIP
24422 movq %rbp, %rdi
24423 call *%rbx
24424 movl $0, RAX(%rsp)
24425 RESTORE_REST
24426 jmp int_ret_from_sys_call
24427 CFI_ENDPROC
24428-END(ret_from_fork)
24429+ENDPROC(ret_from_fork)
24430
24431 /*
24432 * System call entry. Up to 6 arguments in registers are supported.
24433@@ -386,7 +845,7 @@ END(ret_from_fork)
24434 ENTRY(system_call)
24435 CFI_STARTPROC simple
24436 CFI_SIGNAL_FRAME
24437- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
24438+ CFI_DEF_CFA rsp,0
24439 CFI_REGISTER rip,rcx
24440 /*CFI_REGISTER rflags,r11*/
24441 SWAPGS_UNSAFE_STACK
24442@@ -399,16 +858,23 @@ GLOBAL(system_call_after_swapgs)
24443
24444 movq %rsp,PER_CPU_VAR(old_rsp)
24445 movq PER_CPU_VAR(kernel_stack),%rsp
24446+ SAVE_ARGS 8*6,0
24447+ pax_enter_kernel_user
24448+
24449+#ifdef CONFIG_PAX_RANDKSTACK
24450+ pax_erase_kstack
24451+#endif
24452+
24453 /*
24454 * No need to follow this irqs off/on section - it's straight
24455 * and short:
24456 */
24457 ENABLE_INTERRUPTS(CLBR_NONE)
24458- SAVE_ARGS 8,0
24459 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
24460 movq %rcx,RIP-ARGOFFSET(%rsp)
24461 CFI_REL_OFFSET rip,RIP-ARGOFFSET
24462- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
24463+ GET_THREAD_INFO(%rcx)
24464+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
24465 jnz tracesys
24466 system_call_fastpath:
24467 #if __SYSCALL_MASK == ~0
24468@@ -432,10 +898,13 @@ sysret_check:
24469 LOCKDEP_SYS_EXIT
24470 DISABLE_INTERRUPTS(CLBR_NONE)
24471 TRACE_IRQS_OFF
24472- movl TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET),%edx
24473+ GET_THREAD_INFO(%rcx)
24474+ movl TI_flags(%rcx),%edx
24475 andl %edi,%edx
24476 jnz sysret_careful
24477 CFI_REMEMBER_STATE
24478+ pax_exit_kernel_user
24479+ pax_erase_kstack
24480 /*
24481 * sysretq will re-enable interrupts:
24482 */
24483@@ -494,6 +963,9 @@ auditsys:
24484 movq %rax,%rsi /* 2nd arg: syscall number */
24485 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
24486 call __audit_syscall_entry
24487+
24488+ pax_erase_kstack
24489+
24490 LOAD_ARGS 0 /* reload call-clobbered registers */
24491 jmp system_call_fastpath
24492
24493@@ -515,7 +987,7 @@ sysret_audit:
24494 /* Do syscall tracing */
24495 tracesys:
24496 #ifdef CONFIG_AUDITSYSCALL
24497- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
24498+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
24499 jz auditsys
24500 #endif
24501 SAVE_REST
24502@@ -523,12 +995,15 @@ tracesys:
24503 FIXUP_TOP_OF_STACK %rdi
24504 movq %rsp,%rdi
24505 call syscall_trace_enter
24506+
24507+ pax_erase_kstack
24508+
24509 /*
24510 * Reload arg registers from stack in case ptrace changed them.
24511 * We don't reload %rax because syscall_trace_enter() returned
24512 * the value it wants us to use in the table lookup.
24513 */
24514- LOAD_ARGS ARGOFFSET, 1
24515+ LOAD_ARGS 1
24516 RESTORE_REST
24517 #if __SYSCALL_MASK == ~0
24518 cmpq $__NR_syscall_max,%rax
24519@@ -558,7 +1033,9 @@ GLOBAL(int_with_check)
24520 andl %edi,%edx
24521 jnz int_careful
24522 andl $~TS_COMPAT,TI_status(%rcx)
24523- jmp retint_swapgs
24524+ pax_exit_kernel_user
24525+ pax_erase_kstack
24526+ jmp retint_swapgs_pax
24527
24528 /* Either reschedule or signal or syscall exit tracking needed. */
24529 /* First do a reschedule test. */
24530@@ -604,7 +1081,7 @@ int_restore_rest:
24531 TRACE_IRQS_OFF
24532 jmp int_with_check
24533 CFI_ENDPROC
24534-END(system_call)
24535+ENDPROC(system_call)
24536
24537 .macro FORK_LIKE func
24538 ENTRY(stub_\func)
24539@@ -617,9 +1094,10 @@ ENTRY(stub_\func)
24540 DEFAULT_FRAME 0 8 /* offset 8: return address */
24541 call sys_\func
24542 RESTORE_TOP_OF_STACK %r11, 8
24543- ret $REST_SKIP /* pop extended registers */
24544+ pax_force_retaddr
24545+ ret
24546 CFI_ENDPROC
24547-END(stub_\func)
24548+ENDPROC(stub_\func)
24549 .endm
24550
24551 .macro FIXED_FRAME label,func
24552@@ -629,9 +1107,10 @@ ENTRY(\label)
24553 FIXUP_TOP_OF_STACK %r11, 8-ARGOFFSET
24554 call \func
24555 RESTORE_TOP_OF_STACK %r11, 8-ARGOFFSET
24556+ pax_force_retaddr
24557 ret
24558 CFI_ENDPROC
24559-END(\label)
24560+ENDPROC(\label)
24561 .endm
24562
24563 FORK_LIKE clone
24564@@ -639,19 +1118,6 @@ END(\label)
24565 FORK_LIKE vfork
24566 FIXED_FRAME stub_iopl, sys_iopl
24567
24568-ENTRY(ptregscall_common)
24569- DEFAULT_FRAME 1 8 /* offset 8: return address */
24570- RESTORE_TOP_OF_STACK %r11, 8
24571- movq_cfi_restore R15+8, r15
24572- movq_cfi_restore R14+8, r14
24573- movq_cfi_restore R13+8, r13
24574- movq_cfi_restore R12+8, r12
24575- movq_cfi_restore RBP+8, rbp
24576- movq_cfi_restore RBX+8, rbx
24577- ret $REST_SKIP /* pop extended registers */
24578- CFI_ENDPROC
24579-END(ptregscall_common)
24580-
24581 ENTRY(stub_execve)
24582 CFI_STARTPROC
24583 addq $8, %rsp
24584@@ -663,7 +1129,7 @@ ENTRY(stub_execve)
24585 RESTORE_REST
24586 jmp int_ret_from_sys_call
24587 CFI_ENDPROC
24588-END(stub_execve)
24589+ENDPROC(stub_execve)
24590
24591 /*
24592 * sigreturn is special because it needs to restore all registers on return.
24593@@ -680,7 +1146,7 @@ ENTRY(stub_rt_sigreturn)
24594 RESTORE_REST
24595 jmp int_ret_from_sys_call
24596 CFI_ENDPROC
24597-END(stub_rt_sigreturn)
24598+ENDPROC(stub_rt_sigreturn)
24599
24600 #ifdef CONFIG_X86_X32_ABI
24601 ENTRY(stub_x32_rt_sigreturn)
24602@@ -694,7 +1160,7 @@ ENTRY(stub_x32_rt_sigreturn)
24603 RESTORE_REST
24604 jmp int_ret_from_sys_call
24605 CFI_ENDPROC
24606-END(stub_x32_rt_sigreturn)
24607+ENDPROC(stub_x32_rt_sigreturn)
24608
24609 ENTRY(stub_x32_execve)
24610 CFI_STARTPROC
24611@@ -708,7 +1174,7 @@ ENTRY(stub_x32_execve)
24612 RESTORE_REST
24613 jmp int_ret_from_sys_call
24614 CFI_ENDPROC
24615-END(stub_x32_execve)
24616+ENDPROC(stub_x32_execve)
24617
24618 #endif
24619
24620@@ -745,7 +1211,7 @@ vector=vector+1
24621 2: jmp common_interrupt
24622 .endr
24623 CFI_ENDPROC
24624-END(irq_entries_start)
24625+ENDPROC(irq_entries_start)
24626
24627 .previous
24628 END(interrupt)
24629@@ -762,8 +1228,8 @@ END(interrupt)
24630 /* 0(%rsp): ~(interrupt number) */
24631 .macro interrupt func
24632 /* reserve pt_regs for scratch regs and rbp */
24633- subq $ORIG_RAX-RBP, %rsp
24634- CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
24635+ subq $ORIG_RAX, %rsp
24636+ CFI_ADJUST_CFA_OFFSET ORIG_RAX
24637 SAVE_ARGS_IRQ
24638 call \func
24639 .endm
24640@@ -786,14 +1252,14 @@ ret_from_intr:
24641
24642 /* Restore saved previous stack */
24643 popq %rsi
24644- CFI_DEF_CFA rsi,SS+8-RBP /* reg/off reset after def_cfa_expr */
24645- leaq ARGOFFSET-RBP(%rsi), %rsp
24646+ CFI_DEF_CFA rsi,SS+8 /* reg/off reset after def_cfa_expr */
24647+ movq %rsi, %rsp
24648 CFI_DEF_CFA_REGISTER rsp
24649- CFI_ADJUST_CFA_OFFSET RBP-ARGOFFSET
24650+ CFI_ADJUST_CFA_OFFSET -ARGOFFSET
24651
24652 exit_intr:
24653 GET_THREAD_INFO(%rcx)
24654- testl $3,CS-ARGOFFSET(%rsp)
24655+ testb $3,CS-ARGOFFSET(%rsp)
24656 je retint_kernel
24657
24658 /* Interrupt came from user space */
24659@@ -815,12 +1281,35 @@ retint_swapgs: /* return to user-space */
24660 * The iretq could re-enable interrupts:
24661 */
24662 DISABLE_INTERRUPTS(CLBR_ANY)
24663+ pax_exit_kernel_user
24664+retint_swapgs_pax:
24665 TRACE_IRQS_IRETQ
24666 SWAPGS
24667 jmp restore_args
24668
24669 retint_restore_args: /* return to kernel space */
24670 DISABLE_INTERRUPTS(CLBR_ANY)
24671+ pax_exit_kernel
24672+
24673+#if defined(CONFIG_EFI) && defined(CONFIG_PAX_KERNEXEC)
24674+ /* This is a quirk to allow IRQs/NMIs/MCEs during early EFI setup,
24675+ * namely calling EFI runtime services with a phys mapping. We're
24676+ * starting off with NOPs and patch in the real instrumentation
24677+ * (BTS/OR) before starting any userland process; even before starting
24678+ * up the APs.
24679+ */
24680+ .pushsection .altinstr_replacement, "a"
24681+ 601: pax_force_retaddr (RIP-ARGOFFSET)
24682+ 602:
24683+ .popsection
24684+ 603: .fill 602b-601b, 1, 0x90
24685+ .pushsection .altinstructions, "a"
24686+ altinstruction_entry 603b, 601b, X86_FEATURE_ALWAYS, 602b-601b, 602b-601b
24687+ .popsection
24688+#else
24689+ pax_force_retaddr (RIP-ARGOFFSET)
24690+#endif
24691+
24692 /*
24693 * The iretq could re-enable interrupts:
24694 */
24695@@ -933,7 +1422,7 @@ ENTRY(retint_kernel)
24696 jmp exit_intr
24697 #endif
24698 CFI_ENDPROC
24699-END(common_interrupt)
24700+ENDPROC(common_interrupt)
24701
24702 /*
24703 * If IRET takes a fault on the espfix stack, then we
24704@@ -955,13 +1444,13 @@ __do_double_fault:
24705 cmpq $native_irq_return_iret,%rax
24706 jne do_double_fault /* This shouldn't happen... */
24707 movq PER_CPU_VAR(kernel_stack),%rax
24708- subq $(6*8-KERNEL_STACK_OFFSET),%rax /* Reset to original stack */
24709+ subq $(6*8),%rax /* Reset to original stack */
24710 movq %rax,RSP(%rdi)
24711 movq $0,(%rax) /* Missing (lost) #GP error code */
24712 movq $general_protection,RIP(%rdi)
24713 retq
24714 CFI_ENDPROC
24715-END(__do_double_fault)
24716+ENDPROC(__do_double_fault)
24717 #else
24718 # define __do_double_fault do_double_fault
24719 #endif
24720@@ -978,7 +1467,7 @@ ENTRY(\sym)
24721 interrupt \do_sym
24722 jmp ret_from_intr
24723 CFI_ENDPROC
24724-END(\sym)
24725+ENDPROC(\sym)
24726 .endm
24727
24728 #ifdef CONFIG_TRACING
24729@@ -1051,7 +1540,7 @@ apicinterrupt IRQ_WORK_VECTOR \
24730 /*
24731 * Exception entry points.
24732 */
24733-#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
24734+#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r13)
24735
24736 .macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1
24737 ENTRY(\sym)
24738@@ -1102,6 +1591,12 @@ ENTRY(\sym)
24739 .endif
24740
24741 .if \shift_ist != -1
24742+#ifdef CONFIG_SMP
24743+ imul $TSS_size, PER_CPU_VAR(cpu_number), %r13d
24744+ lea init_tss(%r13), %r13
24745+#else
24746+ lea init_tss(%rip), %r13
24747+#endif
24748 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\shift_ist)
24749 .endif
24750
24751@@ -1118,7 +1613,7 @@ ENTRY(\sym)
24752 .endif
24753
24754 CFI_ENDPROC
24755-END(\sym)
24756+ENDPROC(\sym)
24757 .endm
24758
24759 #ifdef CONFIG_TRACING
24760@@ -1159,9 +1654,10 @@ gs_change:
24761 2: mfence /* workaround */
24762 SWAPGS
24763 popfq_cfi
24764+ pax_force_retaddr
24765 ret
24766 CFI_ENDPROC
24767-END(native_load_gs_index)
24768+ENDPROC(native_load_gs_index)
24769
24770 _ASM_EXTABLE(gs_change,bad_gs)
24771 .section .fixup,"ax"
24772@@ -1189,9 +1685,10 @@ ENTRY(do_softirq_own_stack)
24773 CFI_DEF_CFA_REGISTER rsp
24774 CFI_ADJUST_CFA_OFFSET -8
24775 decl PER_CPU_VAR(irq_count)
24776+ pax_force_retaddr
24777 ret
24778 CFI_ENDPROC
24779-END(do_softirq_own_stack)
24780+ENDPROC(do_softirq_own_stack)
24781
24782 #ifdef CONFIG_XEN
24783 idtentry xen_hypervisor_callback xen_do_hypervisor_callback has_error_code=0
24784@@ -1229,7 +1726,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
24785 decl PER_CPU_VAR(irq_count)
24786 jmp error_exit
24787 CFI_ENDPROC
24788-END(xen_do_hypervisor_callback)
24789+ENDPROC(xen_do_hypervisor_callback)
24790
24791 /*
24792 * Hypervisor uses this for application faults while it executes.
24793@@ -1288,7 +1785,7 @@ ENTRY(xen_failsafe_callback)
24794 SAVE_ALL
24795 jmp error_exit
24796 CFI_ENDPROC
24797-END(xen_failsafe_callback)
24798+ENDPROC(xen_failsafe_callback)
24799
24800 apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
24801 xen_hvm_callback_vector xen_evtchn_do_upcall
24802@@ -1335,18 +1832,33 @@ ENTRY(paranoid_exit)
24803 DEFAULT_FRAME
24804 DISABLE_INTERRUPTS(CLBR_NONE)
24805 TRACE_IRQS_OFF_DEBUG
24806- testl %ebx,%ebx /* swapgs needed? */
24807+ testl $1,%ebx /* swapgs needed? */
24808 jnz paranoid_restore
24809- testl $3,CS(%rsp)
24810+ testb $3,CS(%rsp)
24811 jnz paranoid_userspace
24812+#ifdef CONFIG_PAX_MEMORY_UDEREF
24813+ pax_exit_kernel
24814+ TRACE_IRQS_IRETQ 0
24815+ SWAPGS_UNSAFE_STACK
24816+ RESTORE_ALL 8
24817+ pax_force_retaddr_bts
24818+ jmp irq_return
24819+#endif
24820 paranoid_swapgs:
24821+#ifdef CONFIG_PAX_MEMORY_UDEREF
24822+ pax_exit_kernel_user
24823+#else
24824+ pax_exit_kernel
24825+#endif
24826 TRACE_IRQS_IRETQ 0
24827 SWAPGS_UNSAFE_STACK
24828 RESTORE_ALL 8
24829 jmp irq_return
24830 paranoid_restore:
24831+ pax_exit_kernel
24832 TRACE_IRQS_IRETQ_DEBUG 0
24833 RESTORE_ALL 8
24834+ pax_force_retaddr_bts
24835 jmp irq_return
24836 paranoid_userspace:
24837 GET_THREAD_INFO(%rcx)
24838@@ -1375,7 +1887,7 @@ paranoid_schedule:
24839 TRACE_IRQS_OFF
24840 jmp paranoid_userspace
24841 CFI_ENDPROC
24842-END(paranoid_exit)
24843+ENDPROC(paranoid_exit)
24844
24845 /*
24846 * Exception entry point. This expects an error code/orig_rax on the stack.
24847@@ -1402,12 +1914,23 @@ ENTRY(error_entry)
24848 movq %r14, R14+8(%rsp)
24849 movq %r15, R15+8(%rsp)
24850 xorl %ebx,%ebx
24851- testl $3,CS+8(%rsp)
24852+ testb $3,CS+8(%rsp)
24853 je error_kernelspace
24854 error_swapgs:
24855 SWAPGS
24856 error_sti:
24857+#ifdef CONFIG_PAX_MEMORY_UDEREF
24858+ testb $3, CS+8(%rsp)
24859+ jnz 1f
24860+ pax_enter_kernel
24861+ jmp 2f
24862+1: pax_enter_kernel_user
24863+2:
24864+#else
24865+ pax_enter_kernel
24866+#endif
24867 TRACE_IRQS_OFF
24868+ pax_force_retaddr
24869 ret
24870
24871 /*
24872@@ -1435,7 +1958,7 @@ bstep_iret:
24873 movq %rcx,RIP+8(%rsp)
24874 jmp error_swapgs
24875 CFI_ENDPROC
24876-END(error_entry)
24877+ENDPROC(error_entry)
24878
24879
24880 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
24881@@ -1446,7 +1969,7 @@ ENTRY(error_exit)
24882 DISABLE_INTERRUPTS(CLBR_NONE)
24883 TRACE_IRQS_OFF
24884 GET_THREAD_INFO(%rcx)
24885- testl %eax,%eax
24886+ testl $1,%eax
24887 jne retint_kernel
24888 LOCKDEP_SYS_EXIT_IRQ
24889 movl TI_flags(%rcx),%edx
24890@@ -1455,7 +1978,7 @@ ENTRY(error_exit)
24891 jnz retint_careful
24892 jmp retint_swapgs
24893 CFI_ENDPROC
24894-END(error_exit)
24895+ENDPROC(error_exit)
24896
24897 /*
24898 * Test if a given stack is an NMI stack or not.
24899@@ -1513,9 +2036,11 @@ ENTRY(nmi)
24900 * If %cs was not the kernel segment, then the NMI triggered in user
24901 * space, which means it is definitely not nested.
24902 */
24903+ cmpl $__KERNEXEC_KERNEL_CS, 16(%rsp)
24904+ je 1f
24905 cmpl $__KERNEL_CS, 16(%rsp)
24906 jne first_nmi
24907-
24908+1:
24909 /*
24910 * Check the special variable on the stack to see if NMIs are
24911 * executing.
24912@@ -1549,8 +2074,7 @@ nested_nmi:
24913
24914 1:
24915 /* Set up the interrupted NMIs stack to jump to repeat_nmi */
24916- leaq -1*8(%rsp), %rdx
24917- movq %rdx, %rsp
24918+ subq $8, %rsp
24919 CFI_ADJUST_CFA_OFFSET 1*8
24920 leaq -10*8(%rsp), %rdx
24921 pushq_cfi $__KERNEL_DS
24922@@ -1568,6 +2092,7 @@ nested_nmi_out:
24923 CFI_RESTORE rdx
24924
24925 /* No need to check faults here */
24926+# pax_force_retaddr_bts
24927 INTERRUPT_RETURN
24928
24929 CFI_RESTORE_STATE
24930@@ -1664,13 +2189,13 @@ end_repeat_nmi:
24931 subq $ORIG_RAX-R15, %rsp
24932 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
24933 /*
24934- * Use save_paranoid to handle SWAPGS, but no need to use paranoid_exit
24935+ * Use save_paranoid_nmi to handle SWAPGS, but no need to use paranoid_exit
24936 * as we should not be calling schedule in NMI context.
24937 * Even with normal interrupts enabled. An NMI should not be
24938 * setting NEED_RESCHED or anything that normal interrupts and
24939 * exceptions might do.
24940 */
24941- call save_paranoid
24942+ call save_paranoid_nmi
24943 DEFAULT_FRAME 0
24944
24945 /*
24946@@ -1680,9 +2205,9 @@ end_repeat_nmi:
24947 * NMI itself takes a page fault, the page fault that was preempted
24948 * will read the information from the NMI page fault and not the
24949 * origin fault. Save it off and restore it if it changes.
24950- * Use the r12 callee-saved register.
24951+ * Use the r13 callee-saved register.
24952 */
24953- movq %cr2, %r12
24954+ movq %cr2, %r13
24955
24956 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
24957 movq %rsp,%rdi
24958@@ -1691,29 +2216,34 @@ end_repeat_nmi:
24959
24960 /* Did the NMI take a page fault? Restore cr2 if it did */
24961 movq %cr2, %rcx
24962- cmpq %rcx, %r12
24963+ cmpq %rcx, %r13
24964 je 1f
24965- movq %r12, %cr2
24966+ movq %r13, %cr2
24967 1:
24968
24969- testl %ebx,%ebx /* swapgs needed? */
24970+ testl $1,%ebx /* swapgs needed? */
24971 jnz nmi_restore
24972 nmi_swapgs:
24973 SWAPGS_UNSAFE_STACK
24974 nmi_restore:
24975+ pax_exit_kernel_nmi
24976 /* Pop the extra iret frame at once */
24977 RESTORE_ALL 6*8
24978+ testb $3, 8(%rsp)
24979+ jnz 1f
24980+ pax_force_retaddr_bts
24981+1:
24982
24983 /* Clear the NMI executing stack variable */
24984 movq $0, 5*8(%rsp)
24985 jmp irq_return
24986 CFI_ENDPROC
24987-END(nmi)
24988+ENDPROC(nmi)
24989
24990 ENTRY(ignore_sysret)
24991 CFI_STARTPROC
24992 mov $-ENOSYS,%eax
24993 sysret
24994 CFI_ENDPROC
24995-END(ignore_sysret)
24996+ENDPROC(ignore_sysret)
24997
24998diff --git a/arch/x86/kernel/espfix_64.c b/arch/x86/kernel/espfix_64.c
24999index 94d857f..bf1f0bf 100644
25000--- a/arch/x86/kernel/espfix_64.c
25001+++ b/arch/x86/kernel/espfix_64.c
25002@@ -197,7 +197,7 @@ void init_espfix_ap(void)
25003 set_pte(&pte_p[n*PTE_STRIDE], pte);
25004
25005 /* Job is done for this CPU and any CPU which shares this page */
25006- ACCESS_ONCE(espfix_pages[page]) = stack_page;
25007+ ACCESS_ONCE_RW(espfix_pages[page]) = stack_page;
25008
25009 unlock_done:
25010 mutex_unlock(&espfix_init_mutex);
25011diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
25012index 3386dc9..28bdf81 100644
25013--- a/arch/x86/kernel/ftrace.c
25014+++ b/arch/x86/kernel/ftrace.c
25015@@ -88,7 +88,7 @@ static unsigned long text_ip_addr(unsigned long ip)
25016 * kernel identity mapping to modify code.
25017 */
25018 if (within(ip, (unsigned long)_text, (unsigned long)_etext))
25019- ip = (unsigned long)__va(__pa_symbol(ip));
25020+ ip = (unsigned long)__va(__pa_symbol(ktla_ktva(ip)));
25021
25022 return ip;
25023 }
25024@@ -104,6 +104,8 @@ ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code,
25025 {
25026 unsigned char replaced[MCOUNT_INSN_SIZE];
25027
25028+ ip = ktla_ktva(ip);
25029+
25030 /*
25031 * Note: Due to modules and __init, code can
25032 * disappear and change, we need to protect against faulting
25033@@ -229,7 +231,7 @@ static int update_ftrace_func(unsigned long ip, void *new)
25034 unsigned char old[MCOUNT_INSN_SIZE];
25035 int ret;
25036
25037- memcpy(old, (void *)ip, MCOUNT_INSN_SIZE);
25038+ memcpy(old, (void *)ktla_ktva(ip), MCOUNT_INSN_SIZE);
25039
25040 ftrace_update_func = ip;
25041 /* Make sure the breakpoints see the ftrace_update_func update */
25042@@ -310,7 +312,7 @@ static int add_break(unsigned long ip, const char *old)
25043 unsigned char replaced[MCOUNT_INSN_SIZE];
25044 unsigned char brk = BREAKPOINT_INSTRUCTION;
25045
25046- if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
25047+ if (probe_kernel_read(replaced, (void *)ktla_ktva(ip), MCOUNT_INSN_SIZE))
25048 return -EFAULT;
25049
25050 /* Make sure it is what we expect it to be */
25051diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
25052index eda1a86..8f6df48 100644
25053--- a/arch/x86/kernel/head64.c
25054+++ b/arch/x86/kernel/head64.c
25055@@ -67,12 +67,12 @@ again:
25056 pgd = *pgd_p;
25057
25058 /*
25059- * The use of __START_KERNEL_map rather than __PAGE_OFFSET here is
25060- * critical -- __PAGE_OFFSET would point us back into the dynamic
25061+ * The use of __early_va rather than __va here is critical:
25062+ * __va would point us back into the dynamic
25063 * range and we might end up looping forever...
25064 */
25065 if (pgd)
25066- pud_p = (pudval_t *)((pgd & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
25067+ pud_p = (pudval_t *)(__early_va(pgd & PTE_PFN_MASK));
25068 else {
25069 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
25070 reset_early_page_tables();
25071@@ -82,13 +82,13 @@ again:
25072 pud_p = (pudval_t *)early_dynamic_pgts[next_early_pgt++];
25073 for (i = 0; i < PTRS_PER_PUD; i++)
25074 pud_p[i] = 0;
25075- *pgd_p = (pgdval_t)pud_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
25076+ *pgd_p = (pgdval_t)__pa(pud_p) + _KERNPG_TABLE;
25077 }
25078 pud_p += pud_index(address);
25079 pud = *pud_p;
25080
25081 if (pud)
25082- pmd_p = (pmdval_t *)((pud & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
25083+ pmd_p = (pmdval_t *)(__early_va(pud & PTE_PFN_MASK));
25084 else {
25085 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
25086 reset_early_page_tables();
25087@@ -98,7 +98,7 @@ again:
25088 pmd_p = (pmdval_t *)early_dynamic_pgts[next_early_pgt++];
25089 for (i = 0; i < PTRS_PER_PMD; i++)
25090 pmd_p[i] = 0;
25091- *pud_p = (pudval_t)pmd_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
25092+ *pud_p = (pudval_t)__pa(pmd_p) + _KERNPG_TABLE;
25093 }
25094 pmd = (physaddr & PMD_MASK) + early_pmd_flags;
25095 pmd_p[pmd_index(address)] = pmd;
25096@@ -175,7 +175,6 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
25097 if (console_loglevel >= CONSOLE_LOGLEVEL_DEBUG)
25098 early_printk("Kernel alive\n");
25099
25100- clear_page(init_level4_pgt);
25101 /* set init_level4_pgt kernel high mapping*/
25102 init_level4_pgt[511] = early_level4_pgt[511];
25103
25104diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
25105index f36bd42..0ab4474 100644
25106--- a/arch/x86/kernel/head_32.S
25107+++ b/arch/x86/kernel/head_32.S
25108@@ -26,6 +26,12 @@
25109 /* Physical address */
25110 #define pa(X) ((X) - __PAGE_OFFSET)
25111
25112+#ifdef CONFIG_PAX_KERNEXEC
25113+#define ta(X) (X)
25114+#else
25115+#define ta(X) ((X) - __PAGE_OFFSET)
25116+#endif
25117+
25118 /*
25119 * References to members of the new_cpu_data structure.
25120 */
25121@@ -55,11 +61,7 @@
25122 * and small than max_low_pfn, otherwise will waste some page table entries
25123 */
25124
25125-#if PTRS_PER_PMD > 1
25126-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
25127-#else
25128-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
25129-#endif
25130+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
25131
25132 /* Number of possible pages in the lowmem region */
25133 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
25134@@ -78,6 +80,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
25135 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
25136
25137 /*
25138+ * Real beginning of normal "text" segment
25139+ */
25140+ENTRY(stext)
25141+ENTRY(_stext)
25142+
25143+/*
25144 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
25145 * %esi points to the real-mode code as a 32-bit pointer.
25146 * CS and DS must be 4 GB flat segments, but we don't depend on
25147@@ -85,6 +93,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
25148 * can.
25149 */
25150 __HEAD
25151+
25152+#ifdef CONFIG_PAX_KERNEXEC
25153+ jmp startup_32
25154+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
25155+.fill PAGE_SIZE-5,1,0xcc
25156+#endif
25157+
25158 ENTRY(startup_32)
25159 movl pa(stack_start),%ecx
25160
25161@@ -106,6 +121,59 @@ ENTRY(startup_32)
25162 2:
25163 leal -__PAGE_OFFSET(%ecx),%esp
25164
25165+#ifdef CONFIG_SMP
25166+ movl $pa(cpu_gdt_table),%edi
25167+ movl $__per_cpu_load,%eax
25168+ movw %ax,GDT_ENTRY_PERCPU * 8 + 2(%edi)
25169+ rorl $16,%eax
25170+ movb %al,GDT_ENTRY_PERCPU * 8 + 4(%edi)
25171+ movb %ah,GDT_ENTRY_PERCPU * 8 + 7(%edi)
25172+ movl $__per_cpu_end - 1,%eax
25173+ subl $__per_cpu_start,%eax
25174+ movw %ax,GDT_ENTRY_PERCPU * 8 + 0(%edi)
25175+#endif
25176+
25177+#ifdef CONFIG_PAX_MEMORY_UDEREF
25178+ movl $NR_CPUS,%ecx
25179+ movl $pa(cpu_gdt_table),%edi
25180+1:
25181+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
25182+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
25183+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
25184+ addl $PAGE_SIZE_asm,%edi
25185+ loop 1b
25186+#endif
25187+
25188+#ifdef CONFIG_PAX_KERNEXEC
25189+ movl $pa(boot_gdt),%edi
25190+ movl $__LOAD_PHYSICAL_ADDR,%eax
25191+ movw %ax,GDT_ENTRY_BOOT_CS * 8 + 2(%edi)
25192+ rorl $16,%eax
25193+ movb %al,GDT_ENTRY_BOOT_CS * 8 + 4(%edi)
25194+ movb %ah,GDT_ENTRY_BOOT_CS * 8 + 7(%edi)
25195+ rorl $16,%eax
25196+
25197+ ljmp $(__BOOT_CS),$1f
25198+1:
25199+
25200+ movl $NR_CPUS,%ecx
25201+ movl $pa(cpu_gdt_table),%edi
25202+ addl $__PAGE_OFFSET,%eax
25203+1:
25204+ movb $0xc0,GDT_ENTRY_KERNEL_CS * 8 + 6(%edi)
25205+ movb $0xc0,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 6(%edi)
25206+ movw %ax,GDT_ENTRY_KERNEL_CS * 8 + 2(%edi)
25207+ movw %ax,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 2(%edi)
25208+ rorl $16,%eax
25209+ movb %al,GDT_ENTRY_KERNEL_CS * 8 + 4(%edi)
25210+ movb %al,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 4(%edi)
25211+ movb %ah,GDT_ENTRY_KERNEL_CS * 8 + 7(%edi)
25212+ movb %ah,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 7(%edi)
25213+ rorl $16,%eax
25214+ addl $PAGE_SIZE_asm,%edi
25215+ loop 1b
25216+#endif
25217+
25218 /*
25219 * Clear BSS first so that there are no surprises...
25220 */
25221@@ -201,8 +269,11 @@ ENTRY(startup_32)
25222 movl %eax, pa(max_pfn_mapped)
25223
25224 /* Do early initialization of the fixmap area */
25225- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
25226- movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
25227+#ifdef CONFIG_COMPAT_VDSO
25228+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
25229+#else
25230+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
25231+#endif
25232 #else /* Not PAE */
25233
25234 page_pde_offset = (__PAGE_OFFSET >> 20);
25235@@ -232,8 +303,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
25236 movl %eax, pa(max_pfn_mapped)
25237
25238 /* Do early initialization of the fixmap area */
25239- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
25240- movl %eax,pa(initial_page_table+0xffc)
25241+#ifdef CONFIG_COMPAT_VDSO
25242+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
25243+#else
25244+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
25245+#endif
25246 #endif
25247
25248 #ifdef CONFIG_PARAVIRT
25249@@ -247,9 +321,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
25250 cmpl $num_subarch_entries, %eax
25251 jae bad_subarch
25252
25253- movl pa(subarch_entries)(,%eax,4), %eax
25254- subl $__PAGE_OFFSET, %eax
25255- jmp *%eax
25256+ jmp *pa(subarch_entries)(,%eax,4)
25257
25258 bad_subarch:
25259 WEAK(lguest_entry)
25260@@ -261,10 +333,10 @@ WEAK(xen_entry)
25261 __INITDATA
25262
25263 subarch_entries:
25264- .long default_entry /* normal x86/PC */
25265- .long lguest_entry /* lguest hypervisor */
25266- .long xen_entry /* Xen hypervisor */
25267- .long default_entry /* Moorestown MID */
25268+ .long ta(default_entry) /* normal x86/PC */
25269+ .long ta(lguest_entry) /* lguest hypervisor */
25270+ .long ta(xen_entry) /* Xen hypervisor */
25271+ .long ta(default_entry) /* Moorestown MID */
25272 num_subarch_entries = (. - subarch_entries) / 4
25273 .previous
25274 #else
25275@@ -354,6 +426,7 @@ default_entry:
25276 movl pa(mmu_cr4_features),%eax
25277 movl %eax,%cr4
25278
25279+#ifdef CONFIG_X86_PAE
25280 testb $X86_CR4_PAE, %al # check if PAE is enabled
25281 jz enable_paging
25282
25283@@ -382,6 +455,9 @@ default_entry:
25284 /* Make changes effective */
25285 wrmsr
25286
25287+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
25288+#endif
25289+
25290 enable_paging:
25291
25292 /*
25293@@ -449,14 +525,20 @@ is486:
25294 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
25295 movl %eax,%ss # after changing gdt.
25296
25297- movl $(__USER_DS),%eax # DS/ES contains default USER segment
25298+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
25299 movl %eax,%ds
25300 movl %eax,%es
25301
25302 movl $(__KERNEL_PERCPU), %eax
25303 movl %eax,%fs # set this cpu's percpu
25304
25305+#ifdef CONFIG_CC_STACKPROTECTOR
25306 movl $(__KERNEL_STACK_CANARY),%eax
25307+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
25308+ movl $(__USER_DS),%eax
25309+#else
25310+ xorl %eax,%eax
25311+#endif
25312 movl %eax,%gs
25313
25314 xorl %eax,%eax # Clear LDT
25315@@ -512,8 +594,11 @@ setup_once:
25316 * relocation. Manually set base address in stack canary
25317 * segment descriptor.
25318 */
25319- movl $gdt_page,%eax
25320+ movl $cpu_gdt_table,%eax
25321 movl $stack_canary,%ecx
25322+#ifdef CONFIG_SMP
25323+ addl $__per_cpu_load,%ecx
25324+#endif
25325 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
25326 shrl $16, %ecx
25327 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
25328@@ -548,7 +633,7 @@ ENTRY(early_idt_handler)
25329 cmpl $2,(%esp) # X86_TRAP_NMI
25330 je is_nmi # Ignore NMI
25331
25332- cmpl $2,%ss:early_recursion_flag
25333+ cmpl $1,%ss:early_recursion_flag
25334 je hlt_loop
25335 incl %ss:early_recursion_flag
25336
25337@@ -586,8 +671,8 @@ ENTRY(early_idt_handler)
25338 pushl (20+6*4)(%esp) /* trapno */
25339 pushl $fault_msg
25340 call printk
25341-#endif
25342 call dump_stack
25343+#endif
25344 hlt_loop:
25345 hlt
25346 jmp hlt_loop
25347@@ -607,8 +692,11 @@ ENDPROC(early_idt_handler)
25348 /* This is the default interrupt "handler" :-) */
25349 ALIGN
25350 ignore_int:
25351- cld
25352 #ifdef CONFIG_PRINTK
25353+ cmpl $2,%ss:early_recursion_flag
25354+ je hlt_loop
25355+ incl %ss:early_recursion_flag
25356+ cld
25357 pushl %eax
25358 pushl %ecx
25359 pushl %edx
25360@@ -617,9 +705,6 @@ ignore_int:
25361 movl $(__KERNEL_DS),%eax
25362 movl %eax,%ds
25363 movl %eax,%es
25364- cmpl $2,early_recursion_flag
25365- je hlt_loop
25366- incl early_recursion_flag
25367 pushl 16(%esp)
25368 pushl 24(%esp)
25369 pushl 32(%esp)
25370@@ -653,29 +738,34 @@ ENTRY(setup_once_ref)
25371 /*
25372 * BSS section
25373 */
25374-__PAGE_ALIGNED_BSS
25375- .align PAGE_SIZE
25376 #ifdef CONFIG_X86_PAE
25377+.section .initial_pg_pmd,"a",@progbits
25378 initial_pg_pmd:
25379 .fill 1024*KPMDS,4,0
25380 #else
25381+.section .initial_page_table,"a",@progbits
25382 ENTRY(initial_page_table)
25383 .fill 1024,4,0
25384 #endif
25385+.section .initial_pg_fixmap,"a",@progbits
25386 initial_pg_fixmap:
25387 .fill 1024,4,0
25388+.section .empty_zero_page,"a",@progbits
25389 ENTRY(empty_zero_page)
25390 .fill 4096,1,0
25391+.section .swapper_pg_dir,"a",@progbits
25392 ENTRY(swapper_pg_dir)
25393+#ifdef CONFIG_X86_PAE
25394+ .fill 4,8,0
25395+#else
25396 .fill 1024,4,0
25397+#endif
25398
25399 /*
25400 * This starts the data section.
25401 */
25402 #ifdef CONFIG_X86_PAE
25403-__PAGE_ALIGNED_DATA
25404- /* Page-aligned for the benefit of paravirt? */
25405- .align PAGE_SIZE
25406+.section .initial_page_table,"a",@progbits
25407 ENTRY(initial_page_table)
25408 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
25409 # if KPMDS == 3
25410@@ -694,12 +784,20 @@ ENTRY(initial_page_table)
25411 # error "Kernel PMDs should be 1, 2 or 3"
25412 # endif
25413 .align PAGE_SIZE /* needs to be page-sized too */
25414+
25415+#ifdef CONFIG_PAX_PER_CPU_PGD
25416+ENTRY(cpu_pgd)
25417+ .rept 2*NR_CPUS
25418+ .fill 4,8,0
25419+ .endr
25420+#endif
25421+
25422 #endif
25423
25424 .data
25425 .balign 4
25426 ENTRY(stack_start)
25427- .long init_thread_union+THREAD_SIZE
25428+ .long init_thread_union+THREAD_SIZE-8
25429
25430 __INITRODATA
25431 int_msg:
25432@@ -727,7 +825,7 @@ fault_msg:
25433 * segment size, and 32-bit linear address value:
25434 */
25435
25436- .data
25437+.section .rodata,"a",@progbits
25438 .globl boot_gdt_descr
25439 .globl idt_descr
25440
25441@@ -736,7 +834,7 @@ fault_msg:
25442 .word 0 # 32 bit align gdt_desc.address
25443 boot_gdt_descr:
25444 .word __BOOT_DS+7
25445- .long boot_gdt - __PAGE_OFFSET
25446+ .long pa(boot_gdt)
25447
25448 .word 0 # 32-bit align idt_desc.address
25449 idt_descr:
25450@@ -747,7 +845,7 @@ idt_descr:
25451 .word 0 # 32 bit align gdt_desc.address
25452 ENTRY(early_gdt_descr)
25453 .word GDT_ENTRIES*8-1
25454- .long gdt_page /* Overwritten for secondary CPUs */
25455+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
25456
25457 /*
25458 * The boot_gdt must mirror the equivalent in setup.S and is
25459@@ -756,5 +854,65 @@ ENTRY(early_gdt_descr)
25460 .align L1_CACHE_BYTES
25461 ENTRY(boot_gdt)
25462 .fill GDT_ENTRY_BOOT_CS,8,0
25463- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
25464- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
25465+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
25466+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
25467+
25468+ .align PAGE_SIZE_asm
25469+ENTRY(cpu_gdt_table)
25470+ .rept NR_CPUS
25471+ .quad 0x0000000000000000 /* NULL descriptor */
25472+ .quad 0x0000000000000000 /* 0x0b reserved */
25473+ .quad 0x0000000000000000 /* 0x13 reserved */
25474+ .quad 0x0000000000000000 /* 0x1b reserved */
25475+
25476+#ifdef CONFIG_PAX_KERNEXEC
25477+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
25478+#else
25479+ .quad 0x0000000000000000 /* 0x20 unused */
25480+#endif
25481+
25482+ .quad 0x0000000000000000 /* 0x28 unused */
25483+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
25484+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
25485+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
25486+ .quad 0x0000000000000000 /* 0x4b reserved */
25487+ .quad 0x0000000000000000 /* 0x53 reserved */
25488+ .quad 0x0000000000000000 /* 0x5b reserved */
25489+
25490+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
25491+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
25492+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
25493+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
25494+
25495+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
25496+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
25497+
25498+ /*
25499+ * Segments used for calling PnP BIOS have byte granularity.
25500+ * The code segments and data segments have fixed 64k limits,
25501+ * the transfer segment sizes are set at run time.
25502+ */
25503+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
25504+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
25505+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
25506+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
25507+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
25508+
25509+ /*
25510+ * The APM segments have byte granularity and their bases
25511+ * are set at run time. All have 64k limits.
25512+ */
25513+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
25514+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
25515+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
25516+
25517+ .quad 0x00c093000000ffff /* 0xd0 - ESPFIX SS */
25518+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
25519+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
25520+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
25521+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
25522+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
25523+
25524+ /* Be sure this is zeroed to avoid false validations in Xen */
25525+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
25526+ .endr
25527diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
25528index a468c0a..8b5a879 100644
25529--- a/arch/x86/kernel/head_64.S
25530+++ b/arch/x86/kernel/head_64.S
25531@@ -20,6 +20,8 @@
25532 #include <asm/processor-flags.h>
25533 #include <asm/percpu.h>
25534 #include <asm/nops.h>
25535+#include <asm/cpufeature.h>
25536+#include <asm/alternative-asm.h>
25537
25538 #ifdef CONFIG_PARAVIRT
25539 #include <asm/asm-offsets.h>
25540@@ -41,6 +43,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
25541 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
25542 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
25543 L3_START_KERNEL = pud_index(__START_KERNEL_map)
25544+L4_VMALLOC_START = pgd_index(VMALLOC_START)
25545+L3_VMALLOC_START = pud_index(VMALLOC_START)
25546+L4_VMALLOC_END = pgd_index(VMALLOC_END)
25547+L3_VMALLOC_END = pud_index(VMALLOC_END)
25548+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
25549+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
25550
25551 .text
25552 __HEAD
25553@@ -89,11 +97,24 @@ startup_64:
25554 * Fixup the physical addresses in the page table
25555 */
25556 addq %rbp, early_level4_pgt + (L4_START_KERNEL*8)(%rip)
25557+ addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
25558+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
25559+ addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
25560+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
25561+ addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
25562
25563- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
25564- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
25565+ addq %rbp, level3_ident_pgt + (0*8)(%rip)
25566+#ifndef CONFIG_XEN
25567+ addq %rbp, level3_ident_pgt + (1*8)(%rip)
25568+#endif
25569+
25570+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
25571+
25572+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
25573+ addq %rbp, level3_kernel_pgt + ((L3_START_KERNEL+1)*8)(%rip)
25574
25575 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
25576+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
25577
25578 /*
25579 * Set up the identity mapping for the switchover. These
25580@@ -174,11 +195,12 @@ ENTRY(secondary_startup_64)
25581 * after the boot processor executes this code.
25582 */
25583
25584+ orq $-1, %rbp
25585 movq $(init_level4_pgt - __START_KERNEL_map), %rax
25586 1:
25587
25588- /* Enable PAE mode and PGE */
25589- movl $(X86_CR4_PAE | X86_CR4_PGE), %ecx
25590+ /* Enable PAE mode and PSE/PGE */
25591+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %ecx
25592 movq %rcx, %cr4
25593
25594 /* Setup early boot stage 4 level pagetables. */
25595@@ -199,10 +221,19 @@ ENTRY(secondary_startup_64)
25596 movl $MSR_EFER, %ecx
25597 rdmsr
25598 btsl $_EFER_SCE, %eax /* Enable System Call */
25599- btl $20,%edi /* No Execute supported? */
25600+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
25601 jnc 1f
25602 btsl $_EFER_NX, %eax
25603+ cmpq $-1, %rbp
25604+ je 1f
25605 btsq $_PAGE_BIT_NX,early_pmd_flags(%rip)
25606+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_PAGE_OFFSET(%rip)
25607+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMALLOC_START(%rip)
25608+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMALLOC_END(%rip)
25609+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMEMMAP_START(%rip)
25610+ btsq $_PAGE_BIT_NX, level2_fixmap_pgt + 8*506(%rip)
25611+ btsq $_PAGE_BIT_NX, level2_fixmap_pgt + 8*507(%rip)
25612+ btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
25613 1: wrmsr /* Make changes effective */
25614
25615 /* Setup cr0 */
25616@@ -282,6 +313,7 @@ ENTRY(secondary_startup_64)
25617 * REX.W + FF /5 JMP m16:64 Jump far, absolute indirect,
25618 * address given in m16:64.
25619 */
25620+ pax_set_fptr_mask
25621 movq initial_code(%rip),%rax
25622 pushq $0 # fake return address to stop unwinder
25623 pushq $__KERNEL_CS # set correct cs
25624@@ -313,7 +345,7 @@ ENDPROC(start_cpu0)
25625 .quad INIT_PER_CPU_VAR(irq_stack_union)
25626
25627 GLOBAL(stack_start)
25628- .quad init_thread_union+THREAD_SIZE-8
25629+ .quad init_thread_union+THREAD_SIZE-16
25630 .word 0
25631 __FINITDATA
25632
25633@@ -391,7 +423,7 @@ ENTRY(early_idt_handler)
25634 call dump_stack
25635 #ifdef CONFIG_KALLSYMS
25636 leaq early_idt_ripmsg(%rip),%rdi
25637- movq 40(%rsp),%rsi # %rip again
25638+ movq 88(%rsp),%rsi # %rip again
25639 call __print_symbol
25640 #endif
25641 #endif /* EARLY_PRINTK */
25642@@ -420,6 +452,7 @@ ENDPROC(early_idt_handler)
25643 early_recursion_flag:
25644 .long 0
25645
25646+ .section .rodata,"a",@progbits
25647 #ifdef CONFIG_EARLY_PRINTK
25648 early_idt_msg:
25649 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
25650@@ -447,29 +480,52 @@ NEXT_PAGE(early_level4_pgt)
25651 NEXT_PAGE(early_dynamic_pgts)
25652 .fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0
25653
25654- .data
25655+ .section .rodata,"a",@progbits
25656
25657-#ifndef CONFIG_XEN
25658 NEXT_PAGE(init_level4_pgt)
25659- .fill 512,8,0
25660-#else
25661-NEXT_PAGE(init_level4_pgt)
25662- .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
25663 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
25664 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
25665+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
25666+ .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
25667+ .org init_level4_pgt + L4_VMALLOC_END*8, 0
25668+ .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
25669+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
25670+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
25671 .org init_level4_pgt + L4_START_KERNEL*8, 0
25672 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
25673 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
25674
25675+#ifdef CONFIG_PAX_PER_CPU_PGD
25676+NEXT_PAGE(cpu_pgd)
25677+ .rept 2*NR_CPUS
25678+ .fill 512,8,0
25679+ .endr
25680+#endif
25681+
25682 NEXT_PAGE(level3_ident_pgt)
25683 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
25684+#ifdef CONFIG_XEN
25685 .fill 511, 8, 0
25686+#else
25687+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
25688+ .fill 510,8,0
25689+#endif
25690+
25691+NEXT_PAGE(level3_vmalloc_start_pgt)
25692+ .fill 512,8,0
25693+
25694+NEXT_PAGE(level3_vmalloc_end_pgt)
25695+ .fill 512,8,0
25696+
25697+NEXT_PAGE(level3_vmemmap_pgt)
25698+ .fill L3_VMEMMAP_START,8,0
25699+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
25700+
25701 NEXT_PAGE(level2_ident_pgt)
25702- /* Since I easily can, map the first 1G.
25703+ /* Since I easily can, map the first 2G.
25704 * Don't set NX because code runs from these pages.
25705 */
25706- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
25707-#endif
25708+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
25709
25710 NEXT_PAGE(level3_kernel_pgt)
25711 .fill L3_START_KERNEL,8,0
25712@@ -477,6 +533,9 @@ NEXT_PAGE(level3_kernel_pgt)
25713 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
25714 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
25715
25716+NEXT_PAGE(level2_vmemmap_pgt)
25717+ .fill 512,8,0
25718+
25719 NEXT_PAGE(level2_kernel_pgt)
25720 /*
25721 * 512 MB kernel mapping. We spend a full page on this pagetable
25722@@ -494,28 +553,64 @@ NEXT_PAGE(level2_kernel_pgt)
25723 NEXT_PAGE(level2_fixmap_pgt)
25724 .fill 506,8,0
25725 .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
25726- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
25727- .fill 5,8,0
25728+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
25729+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
25730+ .fill 4,8,0
25731
25732 NEXT_PAGE(level1_fixmap_pgt)
25733 .fill 512,8,0
25734
25735+NEXT_PAGE(level1_vsyscall_pgt)
25736+ .fill 512,8,0
25737+
25738 #undef PMDS
25739
25740- .data
25741+ .align PAGE_SIZE
25742+ENTRY(cpu_gdt_table)
25743+ .rept NR_CPUS
25744+ .quad 0x0000000000000000 /* NULL descriptor */
25745+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
25746+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
25747+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
25748+ .quad 0x00cffb000000ffff /* __USER32_CS */
25749+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
25750+ .quad 0x00affb000000ffff /* __USER_CS */
25751+
25752+#ifdef CONFIG_PAX_KERNEXEC
25753+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
25754+#else
25755+ .quad 0x0 /* unused */
25756+#endif
25757+
25758+ .quad 0,0 /* TSS */
25759+ .quad 0,0 /* LDT */
25760+ .quad 0,0,0 /* three TLS descriptors */
25761+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
25762+ /* asm/segment.h:GDT_ENTRIES must match this */
25763+
25764+#ifdef CONFIG_PAX_MEMORY_UDEREF
25765+ .quad 0x00cf93000000ffff /* __UDEREF_KERNEL_DS */
25766+#else
25767+ .quad 0x0 /* unused */
25768+#endif
25769+
25770+ /* zero the remaining page */
25771+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
25772+ .endr
25773+
25774 .align 16
25775 .globl early_gdt_descr
25776 early_gdt_descr:
25777 .word GDT_ENTRIES*8-1
25778 early_gdt_descr_base:
25779- .quad INIT_PER_CPU_VAR(gdt_page)
25780+ .quad cpu_gdt_table
25781
25782 ENTRY(phys_base)
25783 /* This must match the first entry in level2_kernel_pgt */
25784 .quad 0x0000000000000000
25785
25786 #include "../../x86/xen/xen-head.S"
25787-
25788- __PAGE_ALIGNED_BSS
25789+
25790+ .section .rodata,"a",@progbits
25791 NEXT_PAGE(empty_zero_page)
25792 .skip PAGE_SIZE
25793diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
25794index 05fd74f..c3548b1 100644
25795--- a/arch/x86/kernel/i386_ksyms_32.c
25796+++ b/arch/x86/kernel/i386_ksyms_32.c
25797@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
25798 EXPORT_SYMBOL(cmpxchg8b_emu);
25799 #endif
25800
25801+EXPORT_SYMBOL_GPL(cpu_gdt_table);
25802+
25803 /* Networking helper routines. */
25804 EXPORT_SYMBOL(csum_partial_copy_generic);
25805+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
25806+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
25807
25808 EXPORT_SYMBOL(__get_user_1);
25809 EXPORT_SYMBOL(__get_user_2);
25810@@ -44,3 +48,11 @@ EXPORT_SYMBOL(___preempt_schedule);
25811 EXPORT_SYMBOL(___preempt_schedule_context);
25812 #endif
25813 #endif
25814+
25815+#ifdef CONFIG_PAX_KERNEXEC
25816+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
25817+#endif
25818+
25819+#ifdef CONFIG_PAX_PER_CPU_PGD
25820+EXPORT_SYMBOL(cpu_pgd);
25821+#endif
25822diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
25823index a9a4229..6f4d476 100644
25824--- a/arch/x86/kernel/i387.c
25825+++ b/arch/x86/kernel/i387.c
25826@@ -51,7 +51,7 @@ static inline bool interrupted_kernel_fpu_idle(void)
25827 static inline bool interrupted_user_mode(void)
25828 {
25829 struct pt_regs *regs = get_irq_regs();
25830- return regs && user_mode_vm(regs);
25831+ return regs && user_mode(regs);
25832 }
25833
25834 /*
25835diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
25836index 8af8171..f8c1169 100644
25837--- a/arch/x86/kernel/i8259.c
25838+++ b/arch/x86/kernel/i8259.c
25839@@ -110,7 +110,7 @@ static int i8259A_irq_pending(unsigned int irq)
25840 static void make_8259A_irq(unsigned int irq)
25841 {
25842 disable_irq_nosync(irq);
25843- io_apic_irqs &= ~(1<<irq);
25844+ io_apic_irqs &= ~(1UL<<irq);
25845 irq_set_chip_and_handler_name(irq, &i8259A_chip, handle_level_irq,
25846 i8259A_chip.name);
25847 enable_irq(irq);
25848@@ -209,7 +209,7 @@ spurious_8259A_irq:
25849 "spurious 8259A interrupt: IRQ%d.\n", irq);
25850 spurious_irq_mask |= irqmask;
25851 }
25852- atomic_inc(&irq_err_count);
25853+ atomic_inc_unchecked(&irq_err_count);
25854 /*
25855 * Theoretically we do not have to handle this IRQ,
25856 * but in Linux this does not cause problems and is
25857@@ -350,14 +350,16 @@ static void init_8259A(int auto_eoi)
25858 /* (slave's support for AEOI in flat mode is to be investigated) */
25859 outb_pic(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR);
25860
25861+ pax_open_kernel();
25862 if (auto_eoi)
25863 /*
25864 * In AEOI mode we just have to mask the interrupt
25865 * when acking.
25866 */
25867- i8259A_chip.irq_mask_ack = disable_8259A_irq;
25868+ *(void **)&i8259A_chip.irq_mask_ack = disable_8259A_irq;
25869 else
25870- i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
25871+ *(void **)&i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
25872+ pax_close_kernel();
25873
25874 udelay(100); /* wait for 8259A to initialize */
25875
25876diff --git a/arch/x86/kernel/io_delay.c b/arch/x86/kernel/io_delay.c
25877index a979b5b..1d6db75 100644
25878--- a/arch/x86/kernel/io_delay.c
25879+++ b/arch/x86/kernel/io_delay.c
25880@@ -58,7 +58,7 @@ static int __init dmi_io_delay_0xed_port(const struct dmi_system_id *id)
25881 * Quirk table for systems that misbehave (lock up, etc.) if port
25882 * 0x80 is used:
25883 */
25884-static struct dmi_system_id __initdata io_delay_0xed_port_dmi_table[] = {
25885+static const struct dmi_system_id __initconst io_delay_0xed_port_dmi_table[] = {
25886 {
25887 .callback = dmi_io_delay_0xed_port,
25888 .ident = "Compaq Presario V6000",
25889diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
25890index 4ddaf66..49d5c18 100644
25891--- a/arch/x86/kernel/ioport.c
25892+++ b/arch/x86/kernel/ioport.c
25893@@ -6,6 +6,7 @@
25894 #include <linux/sched.h>
25895 #include <linux/kernel.h>
25896 #include <linux/capability.h>
25897+#include <linux/security.h>
25898 #include <linux/errno.h>
25899 #include <linux/types.h>
25900 #include <linux/ioport.h>
25901@@ -30,6 +31,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
25902 return -EINVAL;
25903 if (turn_on && !capable(CAP_SYS_RAWIO))
25904 return -EPERM;
25905+#ifdef CONFIG_GRKERNSEC_IO
25906+ if (turn_on && grsec_disable_privio) {
25907+ gr_handle_ioperm();
25908+ return -ENODEV;
25909+ }
25910+#endif
25911
25912 /*
25913 * If it's the first ioperm() call in this thread's lifetime, set the
25914@@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
25915 * because the ->io_bitmap_max value must match the bitmap
25916 * contents:
25917 */
25918- tss = &per_cpu(init_tss, get_cpu());
25919+ tss = init_tss + get_cpu();
25920
25921 if (turn_on)
25922 bitmap_clear(t->io_bitmap_ptr, from, num);
25923@@ -105,6 +112,12 @@ SYSCALL_DEFINE1(iopl, unsigned int, level)
25924 if (level > old) {
25925 if (!capable(CAP_SYS_RAWIO))
25926 return -EPERM;
25927+#ifdef CONFIG_GRKERNSEC_IO
25928+ if (grsec_disable_privio) {
25929+ gr_handle_iopl();
25930+ return -ENODEV;
25931+ }
25932+#endif
25933 }
25934 regs->flags = (regs->flags & ~X86_EFLAGS_IOPL) | (level << 12);
25935 t->iopl = level << 12;
25936diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
25937index 922d285..6d20692 100644
25938--- a/arch/x86/kernel/irq.c
25939+++ b/arch/x86/kernel/irq.c
25940@@ -22,7 +22,7 @@
25941 #define CREATE_TRACE_POINTS
25942 #include <asm/trace/irq_vectors.h>
25943
25944-atomic_t irq_err_count;
25945+atomic_unchecked_t irq_err_count;
25946
25947 /* Function pointer for generic interrupt vector handling */
25948 void (*x86_platform_ipi_callback)(void) = NULL;
25949@@ -132,9 +132,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
25950 seq_printf(p, "%10u ", irq_stats(j)->irq_hv_callback_count);
25951 seq_printf(p, " Hypervisor callback interrupts\n");
25952 #endif
25953- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
25954+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
25955 #if defined(CONFIG_X86_IO_APIC)
25956- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
25957+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
25958 #endif
25959 return 0;
25960 }
25961@@ -174,7 +174,7 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
25962
25963 u64 arch_irq_stat(void)
25964 {
25965- u64 sum = atomic_read(&irq_err_count);
25966+ u64 sum = atomic_read_unchecked(&irq_err_count);
25967 return sum;
25968 }
25969
25970diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
25971index 63ce838..2ea3e06 100644
25972--- a/arch/x86/kernel/irq_32.c
25973+++ b/arch/x86/kernel/irq_32.c
25974@@ -29,6 +29,8 @@ EXPORT_PER_CPU_SYMBOL(irq_regs);
25975
25976 #ifdef CONFIG_DEBUG_STACKOVERFLOW
25977
25978+extern void gr_handle_kernel_exploit(void);
25979+
25980 int sysctl_panic_on_stackoverflow __read_mostly;
25981
25982 /* Debugging check for stack overflow: is there less than 1KB free? */
25983@@ -39,13 +41,14 @@ static int check_stack_overflow(void)
25984 __asm__ __volatile__("andl %%esp,%0" :
25985 "=r" (sp) : "0" (THREAD_SIZE - 1));
25986
25987- return sp < (sizeof(struct thread_info) + STACK_WARN);
25988+ return sp < STACK_WARN;
25989 }
25990
25991 static void print_stack_overflow(void)
25992 {
25993 printk(KERN_WARNING "low stack detected by irq handler\n");
25994 dump_stack();
25995+ gr_handle_kernel_exploit();
25996 if (sysctl_panic_on_stackoverflow)
25997 panic("low stack detected by irq handler - check messages\n");
25998 }
25999@@ -84,10 +87,9 @@ static inline void *current_stack(void)
26000 static inline int
26001 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
26002 {
26003- struct irq_stack *curstk, *irqstk;
26004+ struct irq_stack *irqstk;
26005 u32 *isp, *prev_esp, arg1, arg2;
26006
26007- curstk = (struct irq_stack *) current_stack();
26008 irqstk = __this_cpu_read(hardirq_stack);
26009
26010 /*
26011@@ -96,15 +98,19 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
26012 * handler) we can't do that and just have to keep using the
26013 * current stack (which is the irq stack already after all)
26014 */
26015- if (unlikely(curstk == irqstk))
26016+ if (unlikely((void *)current_stack_pointer - (void *)irqstk < THREAD_SIZE))
26017 return 0;
26018
26019- isp = (u32 *) ((char *)irqstk + sizeof(*irqstk));
26020+ isp = (u32 *) ((char *)irqstk + sizeof(*irqstk) - 8);
26021
26022 /* Save the next esp at the bottom of the stack */
26023 prev_esp = (u32 *)irqstk;
26024 *prev_esp = current_stack_pointer;
26025
26026+#ifdef CONFIG_PAX_MEMORY_UDEREF
26027+ __set_fs(MAKE_MM_SEG(0));
26028+#endif
26029+
26030 if (unlikely(overflow))
26031 call_on_stack(print_stack_overflow, isp);
26032
26033@@ -115,6 +121,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
26034 : "0" (irq), "1" (desc), "2" (isp),
26035 "D" (desc->handle_irq)
26036 : "memory", "cc", "ecx");
26037+
26038+#ifdef CONFIG_PAX_MEMORY_UDEREF
26039+ __set_fs(current_thread_info()->addr_limit);
26040+#endif
26041+
26042 return 1;
26043 }
26044
26045@@ -123,32 +134,18 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
26046 */
26047 void irq_ctx_init(int cpu)
26048 {
26049- struct irq_stack *irqstk;
26050-
26051 if (per_cpu(hardirq_stack, cpu))
26052 return;
26053
26054- irqstk = page_address(alloc_pages_node(cpu_to_node(cpu),
26055- THREADINFO_GFP,
26056- THREAD_SIZE_ORDER));
26057- per_cpu(hardirq_stack, cpu) = irqstk;
26058-
26059- irqstk = page_address(alloc_pages_node(cpu_to_node(cpu),
26060- THREADINFO_GFP,
26061- THREAD_SIZE_ORDER));
26062- per_cpu(softirq_stack, cpu) = irqstk;
26063-
26064- printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
26065- cpu, per_cpu(hardirq_stack, cpu), per_cpu(softirq_stack, cpu));
26066+ per_cpu(hardirq_stack, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
26067+ per_cpu(softirq_stack, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
26068 }
26069
26070 void do_softirq_own_stack(void)
26071 {
26072- struct thread_info *curstk;
26073 struct irq_stack *irqstk;
26074 u32 *isp, *prev_esp;
26075
26076- curstk = current_stack();
26077 irqstk = __this_cpu_read(softirq_stack);
26078
26079 /* build the stack frame on the softirq stack */
26080@@ -158,7 +155,16 @@ void do_softirq_own_stack(void)
26081 prev_esp = (u32 *)irqstk;
26082 *prev_esp = current_stack_pointer;
26083
26084+#ifdef CONFIG_PAX_MEMORY_UDEREF
26085+ __set_fs(MAKE_MM_SEG(0));
26086+#endif
26087+
26088 call_on_stack(__do_softirq, isp);
26089+
26090+#ifdef CONFIG_PAX_MEMORY_UDEREF
26091+ __set_fs(current_thread_info()->addr_limit);
26092+#endif
26093+
26094 }
26095
26096 bool handle_irq(unsigned irq, struct pt_regs *regs)
26097@@ -172,7 +178,7 @@ bool handle_irq(unsigned irq, struct pt_regs *regs)
26098 if (unlikely(!desc))
26099 return false;
26100
26101- if (user_mode_vm(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
26102+ if (user_mode(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
26103 if (unlikely(overflow))
26104 print_stack_overflow();
26105 desc->handle_irq(irq, desc);
26106diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
26107index 4d1c746..55a22d6 100644
26108--- a/arch/x86/kernel/irq_64.c
26109+++ b/arch/x86/kernel/irq_64.c
26110@@ -26,6 +26,8 @@ EXPORT_PER_CPU_SYMBOL(irq_stat);
26111 DEFINE_PER_CPU(struct pt_regs *, irq_regs);
26112 EXPORT_PER_CPU_SYMBOL(irq_regs);
26113
26114+extern void gr_handle_kernel_exploit(void);
26115+
26116 int sysctl_panic_on_stackoverflow;
26117
26118 /*
26119@@ -44,7 +46,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
26120 u64 estack_top, estack_bottom;
26121 u64 curbase = (u64)task_stack_page(current);
26122
26123- if (user_mode_vm(regs))
26124+ if (user_mode(regs))
26125 return;
26126
26127 if (regs->sp >= curbase + sizeof(struct thread_info) +
26128@@ -69,6 +71,8 @@ static inline void stack_overflow_check(struct pt_regs *regs)
26129 irq_stack_top, irq_stack_bottom,
26130 estack_top, estack_bottom);
26131
26132+ gr_handle_kernel_exploit();
26133+
26134 if (sysctl_panic_on_stackoverflow)
26135 panic("low stack detected by irq handler - check messages\n");
26136 #endif
26137diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c
26138index 26d5a55..a01160a 100644
26139--- a/arch/x86/kernel/jump_label.c
26140+++ b/arch/x86/kernel/jump_label.c
26141@@ -51,7 +51,7 @@ static void __jump_label_transform(struct jump_entry *entry,
26142 * Jump label is enabled for the first time.
26143 * So we expect a default_nop...
26144 */
26145- if (unlikely(memcmp((void *)entry->code, default_nop, 5)
26146+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), default_nop, 5)
26147 != 0))
26148 bug_at((void *)entry->code, __LINE__);
26149 } else {
26150@@ -59,7 +59,7 @@ static void __jump_label_transform(struct jump_entry *entry,
26151 * ...otherwise expect an ideal_nop. Otherwise
26152 * something went horribly wrong.
26153 */
26154- if (unlikely(memcmp((void *)entry->code, ideal_nop, 5)
26155+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), ideal_nop, 5)
26156 != 0))
26157 bug_at((void *)entry->code, __LINE__);
26158 }
26159@@ -75,13 +75,13 @@ static void __jump_label_transform(struct jump_entry *entry,
26160 * are converting the default nop to the ideal nop.
26161 */
26162 if (init) {
26163- if (unlikely(memcmp((void *)entry->code, default_nop, 5) != 0))
26164+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), default_nop, 5) != 0))
26165 bug_at((void *)entry->code, __LINE__);
26166 } else {
26167 code.jump = 0xe9;
26168 code.offset = entry->target -
26169 (entry->code + JUMP_LABEL_NOP_SIZE);
26170- if (unlikely(memcmp((void *)entry->code, &code, 5) != 0))
26171+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), &code, 5) != 0))
26172 bug_at((void *)entry->code, __LINE__);
26173 }
26174 memcpy(&code, ideal_nops[NOP_ATOMIC5], JUMP_LABEL_NOP_SIZE);
26175diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
26176index 7ec1d5f..5a7d130 100644
26177--- a/arch/x86/kernel/kgdb.c
26178+++ b/arch/x86/kernel/kgdb.c
26179@@ -126,11 +126,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
26180 #ifdef CONFIG_X86_32
26181 switch (regno) {
26182 case GDB_SS:
26183- if (!user_mode_vm(regs))
26184+ if (!user_mode(regs))
26185 *(unsigned long *)mem = __KERNEL_DS;
26186 break;
26187 case GDB_SP:
26188- if (!user_mode_vm(regs))
26189+ if (!user_mode(regs))
26190 *(unsigned long *)mem = kernel_stack_pointer(regs);
26191 break;
26192 case GDB_GS:
26193@@ -228,7 +228,10 @@ static void kgdb_correct_hw_break(void)
26194 bp->attr.bp_addr = breakinfo[breakno].addr;
26195 bp->attr.bp_len = breakinfo[breakno].len;
26196 bp->attr.bp_type = breakinfo[breakno].type;
26197- info->address = breakinfo[breakno].addr;
26198+ if (breakinfo[breakno].type == X86_BREAKPOINT_EXECUTE)
26199+ info->address = ktla_ktva(breakinfo[breakno].addr);
26200+ else
26201+ info->address = breakinfo[breakno].addr;
26202 info->len = breakinfo[breakno].len;
26203 info->type = breakinfo[breakno].type;
26204 val = arch_install_hw_breakpoint(bp);
26205@@ -475,12 +478,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
26206 case 'k':
26207 /* clear the trace bit */
26208 linux_regs->flags &= ~X86_EFLAGS_TF;
26209- atomic_set(&kgdb_cpu_doing_single_step, -1);
26210+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
26211
26212 /* set the trace bit if we're stepping */
26213 if (remcomInBuffer[0] == 's') {
26214 linux_regs->flags |= X86_EFLAGS_TF;
26215- atomic_set(&kgdb_cpu_doing_single_step,
26216+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
26217 raw_smp_processor_id());
26218 }
26219
26220@@ -545,7 +548,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
26221
26222 switch (cmd) {
26223 case DIE_DEBUG:
26224- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
26225+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
26226 if (user_mode(regs))
26227 return single_step_cont(regs, args);
26228 break;
26229@@ -750,11 +753,11 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
26230 #endif /* CONFIG_DEBUG_RODATA */
26231
26232 bpt->type = BP_BREAKPOINT;
26233- err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
26234+ err = probe_kernel_read(bpt->saved_instr, ktla_ktva((char *)bpt->bpt_addr),
26235 BREAK_INSTR_SIZE);
26236 if (err)
26237 return err;
26238- err = probe_kernel_write((char *)bpt->bpt_addr,
26239+ err = probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
26240 arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
26241 #ifdef CONFIG_DEBUG_RODATA
26242 if (!err)
26243@@ -767,7 +770,7 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
26244 return -EBUSY;
26245 text_poke((void *)bpt->bpt_addr, arch_kgdb_ops.gdb_bpt_instr,
26246 BREAK_INSTR_SIZE);
26247- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
26248+ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
26249 if (err)
26250 return err;
26251 if (memcmp(opc, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE))
26252@@ -792,13 +795,13 @@ int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
26253 if (mutex_is_locked(&text_mutex))
26254 goto knl_write;
26255 text_poke((void *)bpt->bpt_addr, bpt->saved_instr, BREAK_INSTR_SIZE);
26256- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
26257+ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
26258 if (err || memcmp(opc, bpt->saved_instr, BREAK_INSTR_SIZE))
26259 goto knl_write;
26260 return err;
26261 knl_write:
26262 #endif /* CONFIG_DEBUG_RODATA */
26263- return probe_kernel_write((char *)bpt->bpt_addr,
26264+ return probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
26265 (char *)bpt->saved_instr, BREAK_INSTR_SIZE);
26266 }
26267
26268diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
26269index 67e6d19..731ed28 100644
26270--- a/arch/x86/kernel/kprobes/core.c
26271+++ b/arch/x86/kernel/kprobes/core.c
26272@@ -120,9 +120,12 @@ __synthesize_relative_insn(void *from, void *to, u8 op)
26273 s32 raddr;
26274 } __packed *insn;
26275
26276- insn = (struct __arch_relative_insn *)from;
26277+ insn = (struct __arch_relative_insn *)ktla_ktva(from);
26278+
26279+ pax_open_kernel();
26280 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
26281 insn->op = op;
26282+ pax_close_kernel();
26283 }
26284
26285 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
26286@@ -168,7 +171,7 @@ int can_boost(kprobe_opcode_t *opcodes)
26287 kprobe_opcode_t opcode;
26288 kprobe_opcode_t *orig_opcodes = opcodes;
26289
26290- if (search_exception_tables((unsigned long)opcodes))
26291+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
26292 return 0; /* Page fault may occur on this address. */
26293
26294 retry:
26295@@ -242,9 +245,9 @@ __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
26296 * for the first byte, we can recover the original instruction
26297 * from it and kp->opcode.
26298 */
26299- memcpy(buf, kp->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
26300+ memcpy(buf, ktla_ktva(kp->addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
26301 buf[0] = kp->opcode;
26302- return (unsigned long)buf;
26303+ return ktva_ktla((unsigned long)buf);
26304 }
26305
26306 /*
26307@@ -336,7 +339,9 @@ int __copy_instruction(u8 *dest, u8 *src)
26308 /* Another subsystem puts a breakpoint, failed to recover */
26309 if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
26310 return 0;
26311+ pax_open_kernel();
26312 memcpy(dest, insn.kaddr, insn.length);
26313+ pax_close_kernel();
26314
26315 #ifdef CONFIG_X86_64
26316 if (insn_rip_relative(&insn)) {
26317@@ -363,7 +368,9 @@ int __copy_instruction(u8 *dest, u8 *src)
26318 return 0;
26319 }
26320 disp = (u8 *) dest + insn_offset_displacement(&insn);
26321+ pax_open_kernel();
26322 *(s32 *) disp = (s32) newdisp;
26323+ pax_close_kernel();
26324 }
26325 #endif
26326 return insn.length;
26327@@ -505,7 +512,7 @@ static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
26328 * nor set current_kprobe, because it doesn't use single
26329 * stepping.
26330 */
26331- regs->ip = (unsigned long)p->ainsn.insn;
26332+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
26333 preempt_enable_no_resched();
26334 return;
26335 }
26336@@ -522,9 +529,9 @@ static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
26337 regs->flags &= ~X86_EFLAGS_IF;
26338 /* single step inline if the instruction is an int3 */
26339 if (p->opcode == BREAKPOINT_INSTRUCTION)
26340- regs->ip = (unsigned long)p->addr;
26341+ regs->ip = ktla_ktva((unsigned long)p->addr);
26342 else
26343- regs->ip = (unsigned long)p->ainsn.insn;
26344+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
26345 }
26346 NOKPROBE_SYMBOL(setup_singlestep);
26347
26348@@ -574,7 +581,7 @@ int kprobe_int3_handler(struct pt_regs *regs)
26349 struct kprobe *p;
26350 struct kprobe_ctlblk *kcb;
26351
26352- if (user_mode_vm(regs))
26353+ if (user_mode(regs))
26354 return 0;
26355
26356 addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
26357@@ -609,7 +616,7 @@ int kprobe_int3_handler(struct pt_regs *regs)
26358 setup_singlestep(p, regs, kcb, 0);
26359 return 1;
26360 }
26361- } else if (*addr != BREAKPOINT_INSTRUCTION) {
26362+ } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
26363 /*
26364 * The breakpoint instruction was removed right
26365 * after we hit it. Another cpu has removed
26366@@ -656,6 +663,9 @@ static void __used kretprobe_trampoline_holder(void)
26367 " movq %rax, 152(%rsp)\n"
26368 RESTORE_REGS_STRING
26369 " popfq\n"
26370+#ifdef KERNEXEC_PLUGIN
26371+ " btsq $63,(%rsp)\n"
26372+#endif
26373 #else
26374 " pushf\n"
26375 SAVE_REGS_STRING
26376@@ -796,7 +806,7 @@ static void resume_execution(struct kprobe *p, struct pt_regs *regs,
26377 struct kprobe_ctlblk *kcb)
26378 {
26379 unsigned long *tos = stack_addr(regs);
26380- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
26381+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
26382 unsigned long orig_ip = (unsigned long)p->addr;
26383 kprobe_opcode_t *insn = p->ainsn.insn;
26384
26385@@ -979,7 +989,7 @@ int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val,
26386 struct die_args *args = data;
26387 int ret = NOTIFY_DONE;
26388
26389- if (args->regs && user_mode_vm(args->regs))
26390+ if (args->regs && user_mode(args->regs))
26391 return ret;
26392
26393 if (val == DIE_GPF) {
26394diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
26395index f1314d0..15f3154 100644
26396--- a/arch/x86/kernel/kprobes/opt.c
26397+++ b/arch/x86/kernel/kprobes/opt.c
26398@@ -79,6 +79,7 @@ found:
26399 /* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */
26400 static void synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
26401 {
26402+ pax_open_kernel();
26403 #ifdef CONFIG_X86_64
26404 *addr++ = 0x48;
26405 *addr++ = 0xbf;
26406@@ -86,6 +87,7 @@ static void synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
26407 *addr++ = 0xb8;
26408 #endif
26409 *(unsigned long *)addr = val;
26410+ pax_close_kernel();
26411 }
26412
26413 asm (
26414@@ -337,7 +339,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
26415 * Verify if the address gap is in 2GB range, because this uses
26416 * a relative jump.
26417 */
26418- rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
26419+ rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
26420 if (abs(rel) > 0x7fffffff) {
26421 __arch_remove_optimized_kprobe(op, 0);
26422 return -ERANGE;
26423@@ -354,16 +356,18 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
26424 op->optinsn.size = ret;
26425
26426 /* Copy arch-dep-instance from template */
26427- memcpy(buf, &optprobe_template_entry, TMPL_END_IDX);
26428+ pax_open_kernel();
26429+ memcpy(buf, ktla_ktva(&optprobe_template_entry), TMPL_END_IDX);
26430+ pax_close_kernel();
26431
26432 /* Set probe information */
26433 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
26434
26435 /* Set probe function call */
26436- synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
26437+ synthesize_relcall(ktva_ktla(buf) + TMPL_CALL_IDX, optimized_callback);
26438
26439 /* Set returning jmp instruction at the tail of out-of-line buffer */
26440- synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
26441+ synthesize_reljump(ktva_ktla(buf) + TMPL_END_IDX + op->optinsn.size,
26442 (u8 *)op->kp.addr + op->optinsn.size);
26443
26444 flush_icache_range((unsigned long) buf,
26445@@ -388,7 +392,7 @@ void arch_optimize_kprobes(struct list_head *oplist)
26446 WARN_ON(kprobe_disabled(&op->kp));
26447
26448 /* Backup instructions which will be replaced by jump address */
26449- memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
26450+ memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
26451 RELATIVE_ADDR_SIZE);
26452
26453 insn_buf[0] = RELATIVEJUMP_OPCODE;
26454@@ -436,7 +440,7 @@ int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
26455 /* This kprobe is really able to run optimized path. */
26456 op = container_of(p, struct optimized_kprobe, kp);
26457 /* Detour through copied instructions */
26458- regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX;
26459+ regs->ip = ktva_ktla((unsigned long)op->optinsn.insn) + TMPL_END_IDX;
26460 if (!reenter)
26461 reset_current_kprobe();
26462 preempt_enable_no_resched();
26463diff --git a/arch/x86/kernel/ksysfs.c b/arch/x86/kernel/ksysfs.c
26464index c2bedae..25e7ab60 100644
26465--- a/arch/x86/kernel/ksysfs.c
26466+++ b/arch/x86/kernel/ksysfs.c
26467@@ -184,7 +184,7 @@ out:
26468
26469 static struct kobj_attribute type_attr = __ATTR_RO(type);
26470
26471-static struct bin_attribute data_attr = {
26472+static bin_attribute_no_const data_attr __read_only = {
26473 .attr = {
26474 .name = "data",
26475 .mode = S_IRUGO,
26476diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
26477index c37886d..d851d32 100644
26478--- a/arch/x86/kernel/ldt.c
26479+++ b/arch/x86/kernel/ldt.c
26480@@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
26481 if (reload) {
26482 #ifdef CONFIG_SMP
26483 preempt_disable();
26484- load_LDT(pc);
26485+ load_LDT_nolock(pc);
26486 if (!cpumask_equal(mm_cpumask(current->mm),
26487 cpumask_of(smp_processor_id())))
26488 smp_call_function(flush_ldt, current->mm, 1);
26489 preempt_enable();
26490 #else
26491- load_LDT(pc);
26492+ load_LDT_nolock(pc);
26493 #endif
26494 }
26495 if (oldsize) {
26496@@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
26497 return err;
26498
26499 for (i = 0; i < old->size; i++)
26500- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
26501+ write_ldt_entry(new->ldt, i, old->ldt + i);
26502 return 0;
26503 }
26504
26505@@ -115,6 +115,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
26506 retval = copy_ldt(&mm->context, &old_mm->context);
26507 mutex_unlock(&old_mm->context.lock);
26508 }
26509+
26510+ if (tsk == current) {
26511+ mm->context.vdso = 0;
26512+
26513+#ifdef CONFIG_X86_32
26514+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
26515+ mm->context.user_cs_base = 0UL;
26516+ mm->context.user_cs_limit = ~0UL;
26517+
26518+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
26519+ cpus_clear(mm->context.cpu_user_cs_mask);
26520+#endif
26521+
26522+#endif
26523+#endif
26524+
26525+ }
26526+
26527 return retval;
26528 }
26529
26530@@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
26531 }
26532 }
26533
26534+#ifdef CONFIG_PAX_SEGMEXEC
26535+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
26536+ error = -EINVAL;
26537+ goto out_unlock;
26538+ }
26539+#endif
26540+
26541 if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) {
26542 error = -EINVAL;
26543 goto out_unlock;
26544diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
26545index 1667b1d..16492c5 100644
26546--- a/arch/x86/kernel/machine_kexec_32.c
26547+++ b/arch/x86/kernel/machine_kexec_32.c
26548@@ -25,7 +25,7 @@
26549 #include <asm/cacheflush.h>
26550 #include <asm/debugreg.h>
26551
26552-static void set_idt(void *newidt, __u16 limit)
26553+static void set_idt(struct desc_struct *newidt, __u16 limit)
26554 {
26555 struct desc_ptr curidt;
26556
26557@@ -37,7 +37,7 @@ static void set_idt(void *newidt, __u16 limit)
26558 }
26559
26560
26561-static void set_gdt(void *newgdt, __u16 limit)
26562+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
26563 {
26564 struct desc_ptr curgdt;
26565
26566@@ -215,7 +215,7 @@ void machine_kexec(struct kimage *image)
26567 }
26568
26569 control_page = page_address(image->control_code_page);
26570- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
26571+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
26572
26573 relocate_kernel_ptr = control_page;
26574 page_list[PA_CONTROL_PAGE] = __pa(control_page);
26575diff --git a/arch/x86/kernel/mcount_64.S b/arch/x86/kernel/mcount_64.S
26576index c73aecf..4c63630 100644
26577--- a/arch/x86/kernel/mcount_64.S
26578+++ b/arch/x86/kernel/mcount_64.S
26579@@ -7,7 +7,7 @@
26580 #include <linux/linkage.h>
26581 #include <asm/ptrace.h>
26582 #include <asm/ftrace.h>
26583-
26584+#include <asm/alternative-asm.h>
26585
26586 .code64
26587 .section .entry.text, "ax"
26588@@ -24,8 +24,9 @@
26589 #ifdef CONFIG_DYNAMIC_FTRACE
26590
26591 ENTRY(function_hook)
26592+ pax_force_retaddr
26593 retq
26594-END(function_hook)
26595+ENDPROC(function_hook)
26596
26597 /* skip is set if stack has been adjusted */
26598 .macro ftrace_caller_setup skip=0
26599@@ -62,8 +63,9 @@ GLOBAL(ftrace_graph_call)
26600 #endif
26601
26602 GLOBAL(ftrace_stub)
26603+ pax_force_retaddr
26604 retq
26605-END(ftrace_caller)
26606+ENDPROC(ftrace_caller)
26607
26608 ENTRY(ftrace_regs_caller)
26609 /* Save the current flags before compare (in SS location)*/
26610@@ -127,7 +129,7 @@ GLOBAL(ftrace_regs_call)
26611 popfq
26612 jmp ftrace_stub
26613
26614-END(ftrace_regs_caller)
26615+ENDPROC(ftrace_regs_caller)
26616
26617
26618 #else /* ! CONFIG_DYNAMIC_FTRACE */
26619@@ -145,6 +147,7 @@ ENTRY(function_hook)
26620 #endif
26621
26622 GLOBAL(ftrace_stub)
26623+ pax_force_retaddr
26624 retq
26625
26626 trace:
26627@@ -158,12 +161,13 @@ trace:
26628 #endif
26629 subq $MCOUNT_INSN_SIZE, %rdi
26630
26631+ pax_force_fptr ftrace_trace_function
26632 call *ftrace_trace_function
26633
26634 MCOUNT_RESTORE_FRAME
26635
26636 jmp ftrace_stub
26637-END(function_hook)
26638+ENDPROC(function_hook)
26639 #endif /* CONFIG_DYNAMIC_FTRACE */
26640 #endif /* CONFIG_FUNCTION_TRACER */
26641
26642@@ -185,8 +189,9 @@ ENTRY(ftrace_graph_caller)
26643
26644 MCOUNT_RESTORE_FRAME
26645
26646+ pax_force_retaddr
26647 retq
26648-END(ftrace_graph_caller)
26649+ENDPROC(ftrace_graph_caller)
26650
26651 GLOBAL(return_to_handler)
26652 subq $24, %rsp
26653@@ -202,5 +207,7 @@ GLOBAL(return_to_handler)
26654 movq 8(%rsp), %rdx
26655 movq (%rsp), %rax
26656 addq $24, %rsp
26657+ pax_force_fptr %rdi
26658 jmp *%rdi
26659+ENDPROC(return_to_handler)
26660 #endif
26661diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
26662index e69f988..da078ea 100644
26663--- a/arch/x86/kernel/module.c
26664+++ b/arch/x86/kernel/module.c
26665@@ -81,17 +81,62 @@ static unsigned long int get_module_load_offset(void)
26666 }
26667 #endif
26668
26669-void *module_alloc(unsigned long size)
26670+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
26671 {
26672- if (PAGE_ALIGN(size) > MODULES_LEN)
26673+ if (!size || PAGE_ALIGN(size) > MODULES_LEN)
26674 return NULL;
26675 return __vmalloc_node_range(size, 1,
26676 MODULES_VADDR + get_module_load_offset(),
26677- MODULES_END, GFP_KERNEL | __GFP_HIGHMEM,
26678- PAGE_KERNEL_EXEC, NUMA_NO_NODE,
26679+ MODULES_END, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
26680+ prot, NUMA_NO_NODE,
26681 __builtin_return_address(0));
26682 }
26683
26684+void *module_alloc(unsigned long size)
26685+{
26686+
26687+#ifdef CONFIG_PAX_KERNEXEC
26688+ return __module_alloc(size, PAGE_KERNEL);
26689+#else
26690+ return __module_alloc(size, PAGE_KERNEL_EXEC);
26691+#endif
26692+
26693+}
26694+
26695+#ifdef CONFIG_PAX_KERNEXEC
26696+#ifdef CONFIG_X86_32
26697+void *module_alloc_exec(unsigned long size)
26698+{
26699+ struct vm_struct *area;
26700+
26701+ if (size == 0)
26702+ return NULL;
26703+
26704+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
26705+return area ? area->addr : NULL;
26706+}
26707+EXPORT_SYMBOL(module_alloc_exec);
26708+
26709+void module_free_exec(struct module *mod, void *module_region)
26710+{
26711+ vunmap(module_region);
26712+}
26713+EXPORT_SYMBOL(module_free_exec);
26714+#else
26715+void module_free_exec(struct module *mod, void *module_region)
26716+{
26717+ module_free(mod, module_region);
26718+}
26719+EXPORT_SYMBOL(module_free_exec);
26720+
26721+void *module_alloc_exec(unsigned long size)
26722+{
26723+ return __module_alloc(size, PAGE_KERNEL_RX);
26724+}
26725+EXPORT_SYMBOL(module_alloc_exec);
26726+#endif
26727+#endif
26728+
26729 #ifdef CONFIG_X86_32
26730 int apply_relocate(Elf32_Shdr *sechdrs,
26731 const char *strtab,
26732@@ -102,14 +147,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
26733 unsigned int i;
26734 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
26735 Elf32_Sym *sym;
26736- uint32_t *location;
26737+ uint32_t *plocation, location;
26738
26739 DEBUGP("Applying relocate section %u to %u\n",
26740 relsec, sechdrs[relsec].sh_info);
26741 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
26742 /* This is where to make the change */
26743- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
26744- + rel[i].r_offset;
26745+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
26746+ location = (uint32_t)plocation;
26747+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
26748+ plocation = ktla_ktva((void *)plocation);
26749 /* This is the symbol it is referring to. Note that all
26750 undefined symbols have been resolved. */
26751 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
26752@@ -118,11 +165,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
26753 switch (ELF32_R_TYPE(rel[i].r_info)) {
26754 case R_386_32:
26755 /* We add the value into the location given */
26756- *location += sym->st_value;
26757+ pax_open_kernel();
26758+ *plocation += sym->st_value;
26759+ pax_close_kernel();
26760 break;
26761 case R_386_PC32:
26762 /* Add the value, subtract its position */
26763- *location += sym->st_value - (uint32_t)location;
26764+ pax_open_kernel();
26765+ *plocation += sym->st_value - location;
26766+ pax_close_kernel();
26767 break;
26768 default:
26769 pr_err("%s: Unknown relocation: %u\n",
26770@@ -167,21 +218,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
26771 case R_X86_64_NONE:
26772 break;
26773 case R_X86_64_64:
26774+ pax_open_kernel();
26775 *(u64 *)loc = val;
26776+ pax_close_kernel();
26777 break;
26778 case R_X86_64_32:
26779+ pax_open_kernel();
26780 *(u32 *)loc = val;
26781+ pax_close_kernel();
26782 if (val != *(u32 *)loc)
26783 goto overflow;
26784 break;
26785 case R_X86_64_32S:
26786+ pax_open_kernel();
26787 *(s32 *)loc = val;
26788+ pax_close_kernel();
26789 if ((s64)val != *(s32 *)loc)
26790 goto overflow;
26791 break;
26792 case R_X86_64_PC32:
26793 val -= (u64)loc;
26794+ pax_open_kernel();
26795 *(u32 *)loc = val;
26796+ pax_close_kernel();
26797+
26798 #if 0
26799 if ((s64)val != *(s32 *)loc)
26800 goto overflow;
26801diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
26802index c9603ac..9f88728 100644
26803--- a/arch/x86/kernel/msr.c
26804+++ b/arch/x86/kernel/msr.c
26805@@ -37,6 +37,7 @@
26806 #include <linux/notifier.h>
26807 #include <linux/uaccess.h>
26808 #include <linux/gfp.h>
26809+#include <linux/grsecurity.h>
26810
26811 #include <asm/processor.h>
26812 #include <asm/msr.h>
26813@@ -103,6 +104,11 @@ static ssize_t msr_write(struct file *file, const char __user *buf,
26814 int err = 0;
26815 ssize_t bytes = 0;
26816
26817+#ifdef CONFIG_GRKERNSEC_KMEM
26818+ gr_handle_msr_write();
26819+ return -EPERM;
26820+#endif
26821+
26822 if (count % 8)
26823 return -EINVAL; /* Invalid chunk size */
26824
26825@@ -150,6 +156,10 @@ static long msr_ioctl(struct file *file, unsigned int ioc, unsigned long arg)
26826 err = -EBADF;
26827 break;
26828 }
26829+#ifdef CONFIG_GRKERNSEC_KMEM
26830+ gr_handle_msr_write();
26831+ return -EPERM;
26832+#endif
26833 if (copy_from_user(&regs, uregs, sizeof regs)) {
26834 err = -EFAULT;
26835 break;
26836@@ -233,7 +243,7 @@ static int msr_class_cpu_callback(struct notifier_block *nfb,
26837 return notifier_from_errno(err);
26838 }
26839
26840-static struct notifier_block __refdata msr_class_cpu_notifier = {
26841+static struct notifier_block msr_class_cpu_notifier = {
26842 .notifier_call = msr_class_cpu_callback,
26843 };
26844
26845diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
26846index c3e985d..110a36a 100644
26847--- a/arch/x86/kernel/nmi.c
26848+++ b/arch/x86/kernel/nmi.c
26849@@ -98,16 +98,16 @@ fs_initcall(nmi_warning_debugfs);
26850
26851 static void nmi_max_handler(struct irq_work *w)
26852 {
26853- struct nmiaction *a = container_of(w, struct nmiaction, irq_work);
26854+ struct nmiwork *n = container_of(w, struct nmiwork, irq_work);
26855 int remainder_ns, decimal_msecs;
26856- u64 whole_msecs = ACCESS_ONCE(a->max_duration);
26857+ u64 whole_msecs = ACCESS_ONCE(n->max_duration);
26858
26859 remainder_ns = do_div(whole_msecs, (1000 * 1000));
26860 decimal_msecs = remainder_ns / 1000;
26861
26862 printk_ratelimited(KERN_INFO
26863 "INFO: NMI handler (%ps) took too long to run: %lld.%03d msecs\n",
26864- a->handler, whole_msecs, decimal_msecs);
26865+ n->action->handler, whole_msecs, decimal_msecs);
26866 }
26867
26868 static int nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
26869@@ -134,11 +134,11 @@ static int nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
26870 delta = sched_clock() - delta;
26871 trace_nmi_handler(a->handler, (int)delta, thishandled);
26872
26873- if (delta < nmi_longest_ns || delta < a->max_duration)
26874+ if (delta < nmi_longest_ns || delta < a->work->max_duration)
26875 continue;
26876
26877- a->max_duration = delta;
26878- irq_work_queue(&a->irq_work);
26879+ a->work->max_duration = delta;
26880+ irq_work_queue(&a->work->irq_work);
26881 }
26882
26883 rcu_read_unlock();
26884@@ -148,7 +148,7 @@ static int nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
26885 }
26886 NOKPROBE_SYMBOL(nmi_handle);
26887
26888-int __register_nmi_handler(unsigned int type, struct nmiaction *action)
26889+int __register_nmi_handler(unsigned int type, const struct nmiaction *action)
26890 {
26891 struct nmi_desc *desc = nmi_to_desc(type);
26892 unsigned long flags;
26893@@ -156,7 +156,8 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action)
26894 if (!action->handler)
26895 return -EINVAL;
26896
26897- init_irq_work(&action->irq_work, nmi_max_handler);
26898+ action->work->action = action;
26899+ init_irq_work(&action->work->irq_work, nmi_max_handler);
26900
26901 spin_lock_irqsave(&desc->lock, flags);
26902
26903@@ -174,9 +175,9 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action)
26904 * event confuses some handlers (kdump uses this flag)
26905 */
26906 if (action->flags & NMI_FLAG_FIRST)
26907- list_add_rcu(&action->list, &desc->head);
26908+ pax_list_add_rcu((struct list_head *)&action->list, &desc->head);
26909 else
26910- list_add_tail_rcu(&action->list, &desc->head);
26911+ pax_list_add_tail_rcu((struct list_head *)&action->list, &desc->head);
26912
26913 spin_unlock_irqrestore(&desc->lock, flags);
26914 return 0;
26915@@ -199,7 +200,7 @@ void unregister_nmi_handler(unsigned int type, const char *name)
26916 if (!strcmp(n->name, name)) {
26917 WARN(in_nmi(),
26918 "Trying to free NMI (%s) from NMI context!\n", n->name);
26919- list_del_rcu(&n->list);
26920+ pax_list_del_rcu((struct list_head *)&n->list);
26921 break;
26922 }
26923 }
26924@@ -528,6 +529,17 @@ static inline void nmi_nesting_postprocess(void)
26925 dotraplinkage notrace void
26926 do_nmi(struct pt_regs *regs, long error_code)
26927 {
26928+
26929+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
26930+ if (!user_mode(regs)) {
26931+ unsigned long cs = regs->cs & 0xFFFF;
26932+ unsigned long ip = ktva_ktla(regs->ip);
26933+
26934+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
26935+ regs->ip = ip;
26936+ }
26937+#endif
26938+
26939 nmi_nesting_preprocess(regs);
26940
26941 nmi_enter();
26942diff --git a/arch/x86/kernel/nmi_selftest.c b/arch/x86/kernel/nmi_selftest.c
26943index 6d9582e..f746287 100644
26944--- a/arch/x86/kernel/nmi_selftest.c
26945+++ b/arch/x86/kernel/nmi_selftest.c
26946@@ -43,7 +43,7 @@ static void __init init_nmi_testsuite(void)
26947 {
26948 /* trap all the unknown NMIs we may generate */
26949 register_nmi_handler(NMI_UNKNOWN, nmi_unk_cb, 0, "nmi_selftest_unk",
26950- __initdata);
26951+ __initconst);
26952 }
26953
26954 static void __init cleanup_nmi_testsuite(void)
26955@@ -66,7 +66,7 @@ static void __init test_nmi_ipi(struct cpumask *mask)
26956 unsigned long timeout;
26957
26958 if (register_nmi_handler(NMI_LOCAL, test_nmi_ipi_callback,
26959- NMI_FLAG_FIRST, "nmi_selftest", __initdata)) {
26960+ NMI_FLAG_FIRST, "nmi_selftest", __initconst)) {
26961 nmi_fail = FAILURE;
26962 return;
26963 }
26964diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
26965index bbb6c73..24a58ef 100644
26966--- a/arch/x86/kernel/paravirt-spinlocks.c
26967+++ b/arch/x86/kernel/paravirt-spinlocks.c
26968@@ -8,7 +8,7 @@
26969
26970 #include <asm/paravirt.h>
26971
26972-struct pv_lock_ops pv_lock_ops = {
26973+struct pv_lock_ops pv_lock_ops __read_only = {
26974 #ifdef CONFIG_SMP
26975 .lock_spinning = __PV_IS_CALLEE_SAVE(paravirt_nop),
26976 .unlock_kick = paravirt_nop,
26977diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
26978index 548d25f..f8fb99c 100644
26979--- a/arch/x86/kernel/paravirt.c
26980+++ b/arch/x86/kernel/paravirt.c
26981@@ -56,6 +56,9 @@ u64 _paravirt_ident_64(u64 x)
26982 {
26983 return x;
26984 }
26985+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
26986+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
26987+#endif
26988
26989 void __init default_banner(void)
26990 {
26991@@ -142,16 +145,20 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
26992
26993 if (opfunc == NULL)
26994 /* If there's no function, patch it with a ud2a (BUG) */
26995- ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
26996- else if (opfunc == _paravirt_nop)
26997+ ret = paravirt_patch_insns(insnbuf, len, ktva_ktla(ud2a), ud2a+sizeof(ud2a));
26998+ else if (opfunc == (void *)_paravirt_nop)
26999 /* If the operation is a nop, then nop the callsite */
27000 ret = paravirt_patch_nop();
27001
27002 /* identity functions just return their single argument */
27003- else if (opfunc == _paravirt_ident_32)
27004+ else if (opfunc == (void *)_paravirt_ident_32)
27005 ret = paravirt_patch_ident_32(insnbuf, len);
27006- else if (opfunc == _paravirt_ident_64)
27007+ else if (opfunc == (void *)_paravirt_ident_64)
27008 ret = paravirt_patch_ident_64(insnbuf, len);
27009+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
27010+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
27011+ ret = paravirt_patch_ident_64(insnbuf, len);
27012+#endif
27013
27014 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
27015 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
27016@@ -176,7 +183,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
27017 if (insn_len > len || start == NULL)
27018 insn_len = len;
27019 else
27020- memcpy(insnbuf, start, insn_len);
27021+ memcpy(insnbuf, ktla_ktva(start), insn_len);
27022
27023 return insn_len;
27024 }
27025@@ -300,7 +307,7 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
27026 return this_cpu_read(paravirt_lazy_mode);
27027 }
27028
27029-struct pv_info pv_info = {
27030+struct pv_info pv_info __read_only = {
27031 .name = "bare hardware",
27032 .paravirt_enabled = 0,
27033 .kernel_rpl = 0,
27034@@ -311,16 +318,16 @@ struct pv_info pv_info = {
27035 #endif
27036 };
27037
27038-struct pv_init_ops pv_init_ops = {
27039+struct pv_init_ops pv_init_ops __read_only = {
27040 .patch = native_patch,
27041 };
27042
27043-struct pv_time_ops pv_time_ops = {
27044+struct pv_time_ops pv_time_ops __read_only = {
27045 .sched_clock = native_sched_clock,
27046 .steal_clock = native_steal_clock,
27047 };
27048
27049-__visible struct pv_irq_ops pv_irq_ops = {
27050+__visible struct pv_irq_ops pv_irq_ops __read_only = {
27051 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
27052 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
27053 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
27054@@ -332,7 +339,7 @@ __visible struct pv_irq_ops pv_irq_ops = {
27055 #endif
27056 };
27057
27058-__visible struct pv_cpu_ops pv_cpu_ops = {
27059+__visible struct pv_cpu_ops pv_cpu_ops __read_only = {
27060 .cpuid = native_cpuid,
27061 .get_debugreg = native_get_debugreg,
27062 .set_debugreg = native_set_debugreg,
27063@@ -395,21 +402,26 @@ NOKPROBE_SYMBOL(native_get_debugreg);
27064 NOKPROBE_SYMBOL(native_set_debugreg);
27065 NOKPROBE_SYMBOL(native_load_idt);
27066
27067-struct pv_apic_ops pv_apic_ops = {
27068+struct pv_apic_ops pv_apic_ops __read_only= {
27069 #ifdef CONFIG_X86_LOCAL_APIC
27070 .startup_ipi_hook = paravirt_nop,
27071 #endif
27072 };
27073
27074-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
27075+#ifdef CONFIG_X86_32
27076+#ifdef CONFIG_X86_PAE
27077+/* 64-bit pagetable entries */
27078+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
27079+#else
27080 /* 32-bit pagetable entries */
27081 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
27082+#endif
27083 #else
27084 /* 64-bit pagetable entries */
27085 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
27086 #endif
27087
27088-struct pv_mmu_ops pv_mmu_ops = {
27089+struct pv_mmu_ops pv_mmu_ops __read_only = {
27090
27091 .read_cr2 = native_read_cr2,
27092 .write_cr2 = native_write_cr2,
27093@@ -459,6 +471,7 @@ struct pv_mmu_ops pv_mmu_ops = {
27094 .make_pud = PTE_IDENT,
27095
27096 .set_pgd = native_set_pgd,
27097+ .set_pgd_batched = native_set_pgd_batched,
27098 #endif
27099 #endif /* PAGETABLE_LEVELS >= 3 */
27100
27101@@ -479,6 +492,12 @@ struct pv_mmu_ops pv_mmu_ops = {
27102 },
27103
27104 .set_fixmap = native_set_fixmap,
27105+
27106+#ifdef CONFIG_PAX_KERNEXEC
27107+ .pax_open_kernel = native_pax_open_kernel,
27108+ .pax_close_kernel = native_pax_close_kernel,
27109+#endif
27110+
27111 };
27112
27113 EXPORT_SYMBOL_GPL(pv_time_ops);
27114diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
27115index 0497f71..7186c0d 100644
27116--- a/arch/x86/kernel/pci-calgary_64.c
27117+++ b/arch/x86/kernel/pci-calgary_64.c
27118@@ -1347,7 +1347,7 @@ static void __init get_tce_space_from_tar(void)
27119 tce_space = be64_to_cpu(readq(target));
27120 tce_space = tce_space & TAR_SW_BITS;
27121
27122- tce_space = tce_space & (~specified_table_size);
27123+ tce_space = tce_space & (~(unsigned long)specified_table_size);
27124 info->tce_space = (u64 *)__va(tce_space);
27125 }
27126 }
27127diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
27128index 35ccf75..7a15747 100644
27129--- a/arch/x86/kernel/pci-iommu_table.c
27130+++ b/arch/x86/kernel/pci-iommu_table.c
27131@@ -2,7 +2,7 @@
27132 #include <asm/iommu_table.h>
27133 #include <linux/string.h>
27134 #include <linux/kallsyms.h>
27135-
27136+#include <linux/sched.h>
27137
27138 #define DEBUG 1
27139
27140diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
27141index 77dd0ad..9ec4723 100644
27142--- a/arch/x86/kernel/pci-swiotlb.c
27143+++ b/arch/x86/kernel/pci-swiotlb.c
27144@@ -33,7 +33,7 @@ void x86_swiotlb_free_coherent(struct device *dev, size_t size,
27145 struct dma_attrs *attrs)
27146 {
27147 if (is_swiotlb_buffer(dma_to_phys(dev, dma_addr)))
27148- swiotlb_free_coherent(dev, size, vaddr, dma_addr);
27149+ swiotlb_free_coherent(dev, size, vaddr, dma_addr, attrs);
27150 else
27151 dma_generic_free_coherent(dev, size, vaddr, dma_addr, attrs);
27152 }
27153diff --git a/arch/x86/kernel/preempt.S b/arch/x86/kernel/preempt.S
27154index ca7f0d5..8996469 100644
27155--- a/arch/x86/kernel/preempt.S
27156+++ b/arch/x86/kernel/preempt.S
27157@@ -3,12 +3,14 @@
27158 #include <asm/dwarf2.h>
27159 #include <asm/asm.h>
27160 #include <asm/calling.h>
27161+#include <asm/alternative-asm.h>
27162
27163 ENTRY(___preempt_schedule)
27164 CFI_STARTPROC
27165 SAVE_ALL
27166 call preempt_schedule
27167 RESTORE_ALL
27168+ pax_force_retaddr
27169 ret
27170 CFI_ENDPROC
27171
27172@@ -19,6 +21,7 @@ ENTRY(___preempt_schedule_context)
27173 SAVE_ALL
27174 call preempt_schedule_context
27175 RESTORE_ALL
27176+ pax_force_retaddr
27177 ret
27178 CFI_ENDPROC
27179
27180diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
27181index f804dc9..7c62095 100644
27182--- a/arch/x86/kernel/process.c
27183+++ b/arch/x86/kernel/process.c
27184@@ -36,7 +36,8 @@
27185 * section. Since TSS's are completely CPU-local, we want them
27186 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
27187 */
27188-__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
27189+struct tss_struct init_tss[NR_CPUS] __visible ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
27190+EXPORT_SYMBOL(init_tss);
27191
27192 #ifdef CONFIG_X86_64
27193 static DEFINE_PER_CPU(unsigned char, is_idle);
27194@@ -92,7 +93,7 @@ void arch_task_cache_init(void)
27195 task_xstate_cachep =
27196 kmem_cache_create("task_xstate", xstate_size,
27197 __alignof__(union thread_xstate),
27198- SLAB_PANIC | SLAB_NOTRACK, NULL);
27199+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
27200 setup_xstate_comp();
27201 }
27202
27203@@ -106,7 +107,7 @@ void exit_thread(void)
27204 unsigned long *bp = t->io_bitmap_ptr;
27205
27206 if (bp) {
27207- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
27208+ struct tss_struct *tss = init_tss + get_cpu();
27209
27210 t->io_bitmap_ptr = NULL;
27211 clear_thread_flag(TIF_IO_BITMAP);
27212@@ -126,6 +127,9 @@ void flush_thread(void)
27213 {
27214 struct task_struct *tsk = current;
27215
27216+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
27217+ loadsegment(gs, 0);
27218+#endif
27219 flush_ptrace_hw_breakpoint(tsk);
27220 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
27221 drop_init_fpu(tsk);
27222@@ -272,7 +276,7 @@ static void __exit_idle(void)
27223 void exit_idle(void)
27224 {
27225 /* idle loop has pid 0 */
27226- if (current->pid)
27227+ if (task_pid_nr(current))
27228 return;
27229 __exit_idle();
27230 }
27231@@ -325,7 +329,7 @@ bool xen_set_default_idle(void)
27232 return ret;
27233 }
27234 #endif
27235-void stop_this_cpu(void *dummy)
27236+__noreturn void stop_this_cpu(void *dummy)
27237 {
27238 local_irq_disable();
27239 /*
27240@@ -454,16 +458,37 @@ static int __init idle_setup(char *str)
27241 }
27242 early_param("idle", idle_setup);
27243
27244-unsigned long arch_align_stack(unsigned long sp)
27245+#ifdef CONFIG_PAX_RANDKSTACK
27246+void pax_randomize_kstack(struct pt_regs *regs)
27247 {
27248- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
27249- sp -= get_random_int() % 8192;
27250- return sp & ~0xf;
27251-}
27252+ struct thread_struct *thread = &current->thread;
27253+ unsigned long time;
27254
27255-unsigned long arch_randomize_brk(struct mm_struct *mm)
27256-{
27257- unsigned long range_end = mm->brk + 0x02000000;
27258- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
27259-}
27260+ if (!randomize_va_space)
27261+ return;
27262+
27263+ if (v8086_mode(regs))
27264+ return;
27265
27266+ rdtscl(time);
27267+
27268+ /* P4 seems to return a 0 LSB, ignore it */
27269+#ifdef CONFIG_MPENTIUM4
27270+ time &= 0x3EUL;
27271+ time <<= 2;
27272+#elif defined(CONFIG_X86_64)
27273+ time &= 0xFUL;
27274+ time <<= 4;
27275+#else
27276+ time &= 0x1FUL;
27277+ time <<= 3;
27278+#endif
27279+
27280+ thread->sp0 ^= time;
27281+ load_sp0(init_tss + smp_processor_id(), thread);
27282+
27283+#ifdef CONFIG_X86_64
27284+ this_cpu_write(kernel_stack, thread->sp0);
27285+#endif
27286+}
27287+#endif
27288diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
27289index 7bc86bb..0ea06e8 100644
27290--- a/arch/x86/kernel/process_32.c
27291+++ b/arch/x86/kernel/process_32.c
27292@@ -64,6 +64,7 @@ asmlinkage void ret_from_kernel_thread(void) __asm__("ret_from_kernel_thread");
27293 unsigned long thread_saved_pc(struct task_struct *tsk)
27294 {
27295 return ((unsigned long *)tsk->thread.sp)[3];
27296+//XXX return tsk->thread.eip;
27297 }
27298
27299 void __show_regs(struct pt_regs *regs, int all)
27300@@ -73,19 +74,18 @@ void __show_regs(struct pt_regs *regs, int all)
27301 unsigned long sp;
27302 unsigned short ss, gs;
27303
27304- if (user_mode_vm(regs)) {
27305+ if (user_mode(regs)) {
27306 sp = regs->sp;
27307 ss = regs->ss & 0xffff;
27308- gs = get_user_gs(regs);
27309 } else {
27310 sp = kernel_stack_pointer(regs);
27311 savesegment(ss, ss);
27312- savesegment(gs, gs);
27313 }
27314+ gs = get_user_gs(regs);
27315
27316 printk(KERN_DEFAULT "EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
27317 (u16)regs->cs, regs->ip, regs->flags,
27318- smp_processor_id());
27319+ raw_smp_processor_id());
27320 print_symbol("EIP is at %s\n", regs->ip);
27321
27322 printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
27323@@ -132,20 +132,21 @@ void release_thread(struct task_struct *dead_task)
27324 int copy_thread(unsigned long clone_flags, unsigned long sp,
27325 unsigned long arg, struct task_struct *p)
27326 {
27327- struct pt_regs *childregs = task_pt_regs(p);
27328+ struct pt_regs *childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
27329 struct task_struct *tsk;
27330 int err;
27331
27332 p->thread.sp = (unsigned long) childregs;
27333 p->thread.sp0 = (unsigned long) (childregs+1);
27334+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
27335
27336 if (unlikely(p->flags & PF_KTHREAD)) {
27337 /* kernel thread */
27338 memset(childregs, 0, sizeof(struct pt_regs));
27339 p->thread.ip = (unsigned long) ret_from_kernel_thread;
27340- task_user_gs(p) = __KERNEL_STACK_CANARY;
27341- childregs->ds = __USER_DS;
27342- childregs->es = __USER_DS;
27343+ savesegment(gs, childregs->gs);
27344+ childregs->ds = __KERNEL_DS;
27345+ childregs->es = __KERNEL_DS;
27346 childregs->fs = __KERNEL_PERCPU;
27347 childregs->bx = sp; /* function */
27348 childregs->bp = arg;
27349@@ -252,7 +253,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
27350 struct thread_struct *prev = &prev_p->thread,
27351 *next = &next_p->thread;
27352 int cpu = smp_processor_id();
27353- struct tss_struct *tss = &per_cpu(init_tss, cpu);
27354+ struct tss_struct *tss = init_tss + cpu;
27355 fpu_switch_t fpu;
27356
27357 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
27358@@ -276,6 +277,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
27359 */
27360 lazy_save_gs(prev->gs);
27361
27362+#ifdef CONFIG_PAX_MEMORY_UDEREF
27363+ __set_fs(task_thread_info(next_p)->addr_limit);
27364+#endif
27365+
27366 /*
27367 * Load the per-thread Thread-Local Storage descriptor.
27368 */
27369@@ -314,9 +319,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
27370 */
27371 arch_end_context_switch(next_p);
27372
27373- this_cpu_write(kernel_stack,
27374- (unsigned long)task_stack_page(next_p) +
27375- THREAD_SIZE - KERNEL_STACK_OFFSET);
27376+ this_cpu_write(current_task, next_p);
27377+ this_cpu_write(current_tinfo, &next_p->tinfo);
27378+ this_cpu_write(kernel_stack, next->sp0);
27379
27380 /*
27381 * Restore %gs if needed (which is common)
27382@@ -326,8 +331,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
27383
27384 switch_fpu_finish(next_p, fpu);
27385
27386- this_cpu_write(current_task, next_p);
27387-
27388 return prev_p;
27389 }
27390
27391@@ -357,4 +360,3 @@ unsigned long get_wchan(struct task_struct *p)
27392 } while (count++ < 16);
27393 return 0;
27394 }
27395-
27396diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
27397index ca5b02d..c0b2f6a 100644
27398--- a/arch/x86/kernel/process_64.c
27399+++ b/arch/x86/kernel/process_64.c
27400@@ -158,10 +158,11 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
27401 struct pt_regs *childregs;
27402 struct task_struct *me = current;
27403
27404- p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE;
27405+ p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE - 16;
27406 childregs = task_pt_regs(p);
27407 p->thread.sp = (unsigned long) childregs;
27408 p->thread.usersp = me->thread.usersp;
27409+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
27410 set_tsk_thread_flag(p, TIF_FORK);
27411 p->thread.fpu_counter = 0;
27412 p->thread.io_bitmap_ptr = NULL;
27413@@ -172,6 +173,8 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
27414 p->thread.fs = p->thread.fsindex ? 0 : me->thread.fs;
27415 savesegment(es, p->thread.es);
27416 savesegment(ds, p->thread.ds);
27417+ savesegment(ss, p->thread.ss);
27418+ BUG_ON(p->thread.ss == __UDEREF_KERNEL_DS);
27419 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
27420
27421 if (unlikely(p->flags & PF_KTHREAD)) {
27422@@ -280,7 +283,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
27423 struct thread_struct *prev = &prev_p->thread;
27424 struct thread_struct *next = &next_p->thread;
27425 int cpu = smp_processor_id();
27426- struct tss_struct *tss = &per_cpu(init_tss, cpu);
27427+ struct tss_struct *tss = init_tss + cpu;
27428 unsigned fsindex, gsindex;
27429 fpu_switch_t fpu;
27430
27431@@ -303,6 +306,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
27432 if (unlikely(next->ds | prev->ds))
27433 loadsegment(ds, next->ds);
27434
27435+ savesegment(ss, prev->ss);
27436+ if (unlikely(next->ss != prev->ss))
27437+ loadsegment(ss, next->ss);
27438
27439 /* We must save %fs and %gs before load_TLS() because
27440 * %fs and %gs may be cleared by load_TLS().
27441@@ -362,6 +368,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
27442 prev->usersp = this_cpu_read(old_rsp);
27443 this_cpu_write(old_rsp, next->usersp);
27444 this_cpu_write(current_task, next_p);
27445+ this_cpu_write(current_tinfo, &next_p->tinfo);
27446
27447 /*
27448 * If it were not for PREEMPT_ACTIVE we could guarantee that the
27449@@ -371,9 +378,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
27450 task_thread_info(prev_p)->saved_preempt_count = this_cpu_read(__preempt_count);
27451 this_cpu_write(__preempt_count, task_thread_info(next_p)->saved_preempt_count);
27452
27453- this_cpu_write(kernel_stack,
27454- (unsigned long)task_stack_page(next_p) +
27455- THREAD_SIZE - KERNEL_STACK_OFFSET);
27456+ this_cpu_write(kernel_stack, next->sp0);
27457
27458 /*
27459 * Now maybe reload the debug registers and handle I/O bitmaps
27460@@ -443,12 +448,11 @@ unsigned long get_wchan(struct task_struct *p)
27461 if (!p || p == current || p->state == TASK_RUNNING)
27462 return 0;
27463 stack = (unsigned long)task_stack_page(p);
27464- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
27465+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
27466 return 0;
27467 fp = *(u64 *)(p->thread.sp);
27468 do {
27469- if (fp < (unsigned long)stack ||
27470- fp >= (unsigned long)stack+THREAD_SIZE)
27471+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
27472 return 0;
27473 ip = *(u64 *)(fp+8);
27474 if (!in_sched_functions(ip))
27475diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
27476index 678c0ad..2fc2a7b 100644
27477--- a/arch/x86/kernel/ptrace.c
27478+++ b/arch/x86/kernel/ptrace.c
27479@@ -186,10 +186,10 @@ unsigned long kernel_stack_pointer(struct pt_regs *regs)
27480 unsigned long sp = (unsigned long)&regs->sp;
27481 u32 *prev_esp;
27482
27483- if (context == (sp & ~(THREAD_SIZE - 1)))
27484+ if (context == ((sp + 8) & ~(THREAD_SIZE - 1)))
27485 return sp;
27486
27487- prev_esp = (u32 *)(context);
27488+ prev_esp = *(u32 **)(context);
27489 if (prev_esp)
27490 return (unsigned long)prev_esp;
27491
27492@@ -452,6 +452,20 @@ static int putreg(struct task_struct *child,
27493 if (child->thread.gs != value)
27494 return do_arch_prctl(child, ARCH_SET_GS, value);
27495 return 0;
27496+
27497+ case offsetof(struct user_regs_struct,ip):
27498+ /*
27499+ * Protect against any attempt to set ip to an
27500+ * impossible address. There are dragons lurking if the
27501+ * address is noncanonical. (This explicitly allows
27502+ * setting ip to TASK_SIZE_MAX, because user code can do
27503+ * that all by itself by running off the end of its
27504+ * address space.
27505+ */
27506+ if (value > TASK_SIZE_MAX)
27507+ return -EIO;
27508+ break;
27509+
27510 #endif
27511 }
27512
27513@@ -588,7 +602,7 @@ static void ptrace_triggered(struct perf_event *bp,
27514 static unsigned long ptrace_get_dr7(struct perf_event *bp[])
27515 {
27516 int i;
27517- int dr7 = 0;
27518+ unsigned long dr7 = 0;
27519 struct arch_hw_breakpoint *info;
27520
27521 for (i = 0; i < HBP_NUM; i++) {
27522@@ -822,7 +836,7 @@ long arch_ptrace(struct task_struct *child, long request,
27523 unsigned long addr, unsigned long data)
27524 {
27525 int ret;
27526- unsigned long __user *datap = (unsigned long __user *)data;
27527+ unsigned long __user *datap = (__force unsigned long __user *)data;
27528
27529 switch (request) {
27530 /* read the word at location addr in the USER area. */
27531@@ -907,14 +921,14 @@ long arch_ptrace(struct task_struct *child, long request,
27532 if ((int) addr < 0)
27533 return -EIO;
27534 ret = do_get_thread_area(child, addr,
27535- (struct user_desc __user *)data);
27536+ (__force struct user_desc __user *) data);
27537 break;
27538
27539 case PTRACE_SET_THREAD_AREA:
27540 if ((int) addr < 0)
27541 return -EIO;
27542 ret = do_set_thread_area(child, addr,
27543- (struct user_desc __user *)data, 0);
27544+ (__force struct user_desc __user *) data, 0);
27545 break;
27546 #endif
27547
27548@@ -1292,7 +1306,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
27549
27550 #ifdef CONFIG_X86_64
27551
27552-static struct user_regset x86_64_regsets[] __read_mostly = {
27553+static user_regset_no_const x86_64_regsets[] __read_only = {
27554 [REGSET_GENERAL] = {
27555 .core_note_type = NT_PRSTATUS,
27556 .n = sizeof(struct user_regs_struct) / sizeof(long),
27557@@ -1333,7 +1347,7 @@ static const struct user_regset_view user_x86_64_view = {
27558 #endif /* CONFIG_X86_64 */
27559
27560 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
27561-static struct user_regset x86_32_regsets[] __read_mostly = {
27562+static user_regset_no_const x86_32_regsets[] __read_only = {
27563 [REGSET_GENERAL] = {
27564 .core_note_type = NT_PRSTATUS,
27565 .n = sizeof(struct user_regs_struct32) / sizeof(u32),
27566@@ -1386,7 +1400,7 @@ static const struct user_regset_view user_x86_32_view = {
27567 */
27568 u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
27569
27570-void update_regset_xstate_info(unsigned int size, u64 xstate_mask)
27571+void __init update_regset_xstate_info(unsigned int size, u64 xstate_mask)
27572 {
27573 #ifdef CONFIG_X86_64
27574 x86_64_regsets[REGSET_XSTATE].n = size / sizeof(u64);
27575@@ -1421,7 +1435,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
27576 memset(info, 0, sizeof(*info));
27577 info->si_signo = SIGTRAP;
27578 info->si_code = si_code;
27579- info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
27580+ info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
27581 }
27582
27583 void user_single_step_siginfo(struct task_struct *tsk,
27584@@ -1450,6 +1464,10 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
27585 # define IS_IA32 0
27586 #endif
27587
27588+#ifdef CONFIG_GRKERNSEC_SETXID
27589+extern void gr_delayed_cred_worker(void);
27590+#endif
27591+
27592 /*
27593 * We must return the syscall number to actually look up in the table.
27594 * This can be -1L to skip running any syscall at all.
27595@@ -1460,6 +1478,11 @@ long syscall_trace_enter(struct pt_regs *regs)
27596
27597 user_exit();
27598
27599+#ifdef CONFIG_GRKERNSEC_SETXID
27600+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
27601+ gr_delayed_cred_worker();
27602+#endif
27603+
27604 /*
27605 * If we stepped into a sysenter/syscall insn, it trapped in
27606 * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
27607@@ -1515,6 +1538,11 @@ void syscall_trace_leave(struct pt_regs *regs)
27608 */
27609 user_exit();
27610
27611+#ifdef CONFIG_GRKERNSEC_SETXID
27612+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
27613+ gr_delayed_cred_worker();
27614+#endif
27615+
27616 audit_syscall_exit(regs);
27617
27618 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
27619diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
27620index 2f355d2..e75ed0a 100644
27621--- a/arch/x86/kernel/pvclock.c
27622+++ b/arch/x86/kernel/pvclock.c
27623@@ -51,11 +51,11 @@ void pvclock_touch_watchdogs(void)
27624 reset_hung_task_detector();
27625 }
27626
27627-static atomic64_t last_value = ATOMIC64_INIT(0);
27628+static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
27629
27630 void pvclock_resume(void)
27631 {
27632- atomic64_set(&last_value, 0);
27633+ atomic64_set_unchecked(&last_value, 0);
27634 }
27635
27636 u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src)
27637@@ -105,11 +105,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
27638 * updating at the same time, and one of them could be slightly behind,
27639 * making the assumption that last_value always go forward fail to hold.
27640 */
27641- last = atomic64_read(&last_value);
27642+ last = atomic64_read_unchecked(&last_value);
27643 do {
27644 if (ret < last)
27645 return last;
27646- last = atomic64_cmpxchg(&last_value, last, ret);
27647+ last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
27648 } while (unlikely(last != ret));
27649
27650 return ret;
27651diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
27652index 17962e6..47f55db 100644
27653--- a/arch/x86/kernel/reboot.c
27654+++ b/arch/x86/kernel/reboot.c
27655@@ -69,6 +69,11 @@ static int __init set_bios_reboot(const struct dmi_system_id *d)
27656
27657 void __noreturn machine_real_restart(unsigned int type)
27658 {
27659+
27660+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
27661+ struct desc_struct *gdt;
27662+#endif
27663+
27664 local_irq_disable();
27665
27666 /*
27667@@ -96,7 +101,29 @@ void __noreturn machine_real_restart(unsigned int type)
27668
27669 /* Jump to the identity-mapped low memory code */
27670 #ifdef CONFIG_X86_32
27671- asm volatile("jmpl *%0" : :
27672+
27673+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
27674+ gdt = get_cpu_gdt_table(smp_processor_id());
27675+ pax_open_kernel();
27676+#ifdef CONFIG_PAX_MEMORY_UDEREF
27677+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
27678+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
27679+ loadsegment(ds, __KERNEL_DS);
27680+ loadsegment(es, __KERNEL_DS);
27681+ loadsegment(ss, __KERNEL_DS);
27682+#endif
27683+#ifdef CONFIG_PAX_KERNEXEC
27684+ gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
27685+ gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
27686+ gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
27687+ gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
27688+ gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
27689+ gdt[GDT_ENTRY_KERNEL_CS].g = 1;
27690+#endif
27691+ pax_close_kernel();
27692+#endif
27693+
27694+ asm volatile("ljmpl *%0" : :
27695 "rm" (real_mode_header->machine_real_restart_asm),
27696 "a" (type));
27697 #else
27698@@ -500,7 +527,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
27699 * This means that this function can never return, it can misbehave
27700 * by not rebooting properly and hanging.
27701 */
27702-static void native_machine_emergency_restart(void)
27703+static void __noreturn native_machine_emergency_restart(void)
27704 {
27705 int i;
27706 int attempt = 0;
27707@@ -620,13 +647,13 @@ void native_machine_shutdown(void)
27708 #endif
27709 }
27710
27711-static void __machine_emergency_restart(int emergency)
27712+static void __noreturn __machine_emergency_restart(int emergency)
27713 {
27714 reboot_emergency = emergency;
27715 machine_ops.emergency_restart();
27716 }
27717
27718-static void native_machine_restart(char *__unused)
27719+static void __noreturn native_machine_restart(char *__unused)
27720 {
27721 pr_notice("machine restart\n");
27722
27723@@ -635,7 +662,7 @@ static void native_machine_restart(char *__unused)
27724 __machine_emergency_restart(0);
27725 }
27726
27727-static void native_machine_halt(void)
27728+static void __noreturn native_machine_halt(void)
27729 {
27730 /* Stop other cpus and apics */
27731 machine_shutdown();
27732@@ -645,7 +672,7 @@ static void native_machine_halt(void)
27733 stop_this_cpu(NULL);
27734 }
27735
27736-static void native_machine_power_off(void)
27737+static void __noreturn native_machine_power_off(void)
27738 {
27739 if (pm_power_off) {
27740 if (!reboot_force)
27741@@ -654,9 +681,10 @@ static void native_machine_power_off(void)
27742 }
27743 /* A fallback in case there is no PM info available */
27744 tboot_shutdown(TB_SHUTDOWN_HALT);
27745+ unreachable();
27746 }
27747
27748-struct machine_ops machine_ops = {
27749+struct machine_ops machine_ops __read_only = {
27750 .power_off = native_machine_power_off,
27751 .shutdown = native_machine_shutdown,
27752 .emergency_restart = native_machine_emergency_restart,
27753diff --git a/arch/x86/kernel/reboot_fixups_32.c b/arch/x86/kernel/reboot_fixups_32.c
27754index c8e41e9..64049ef 100644
27755--- a/arch/x86/kernel/reboot_fixups_32.c
27756+++ b/arch/x86/kernel/reboot_fixups_32.c
27757@@ -57,7 +57,7 @@ struct device_fixup {
27758 unsigned int vendor;
27759 unsigned int device;
27760 void (*reboot_fixup)(struct pci_dev *);
27761-};
27762+} __do_const;
27763
27764 /*
27765 * PCI ids solely used for fixups_table go here
27766diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
27767index 3fd2c69..a444264 100644
27768--- a/arch/x86/kernel/relocate_kernel_64.S
27769+++ b/arch/x86/kernel/relocate_kernel_64.S
27770@@ -96,8 +96,7 @@ relocate_kernel:
27771
27772 /* jump to identity mapped page */
27773 addq $(identity_mapped - relocate_kernel), %r8
27774- pushq %r8
27775- ret
27776+ jmp *%r8
27777
27778 identity_mapped:
27779 /* set return address to 0 if not preserving context */
27780diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
27781index 41ead8d..7ccde23 100644
27782--- a/arch/x86/kernel/setup.c
27783+++ b/arch/x86/kernel/setup.c
27784@@ -110,6 +110,7 @@
27785 #include <asm/mce.h>
27786 #include <asm/alternative.h>
27787 #include <asm/prom.h>
27788+#include <asm/boot.h>
27789
27790 /*
27791 * max_low_pfn_mapped: highest direct mapped pfn under 4GB
27792@@ -205,12 +206,50 @@ EXPORT_SYMBOL(boot_cpu_data);
27793 #endif
27794
27795
27796-#if !defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64)
27797-__visible unsigned long mmu_cr4_features;
27798+#ifdef CONFIG_X86_64
27799+__visible unsigned long mmu_cr4_features __read_only = X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE;
27800+#elif defined(CONFIG_X86_PAE)
27801+__visible unsigned long mmu_cr4_features __read_only = X86_CR4_PAE;
27802 #else
27803-__visible unsigned long mmu_cr4_features = X86_CR4_PAE;
27804+__visible unsigned long mmu_cr4_features __read_only;
27805 #endif
27806
27807+void set_in_cr4(unsigned long mask)
27808+{
27809+ unsigned long cr4 = read_cr4();
27810+
27811+ if ((cr4 & mask) == mask && cr4 == mmu_cr4_features)
27812+ return;
27813+
27814+ pax_open_kernel();
27815+ mmu_cr4_features |= mask;
27816+ pax_close_kernel();
27817+
27818+ if (trampoline_cr4_features)
27819+ *trampoline_cr4_features = mmu_cr4_features;
27820+ cr4 |= mask;
27821+ write_cr4(cr4);
27822+}
27823+EXPORT_SYMBOL(set_in_cr4);
27824+
27825+void clear_in_cr4(unsigned long mask)
27826+{
27827+ unsigned long cr4 = read_cr4();
27828+
27829+ if (!(cr4 & mask) && cr4 == mmu_cr4_features)
27830+ return;
27831+
27832+ pax_open_kernel();
27833+ mmu_cr4_features &= ~mask;
27834+ pax_close_kernel();
27835+
27836+ if (trampoline_cr4_features)
27837+ *trampoline_cr4_features = mmu_cr4_features;
27838+ cr4 &= ~mask;
27839+ write_cr4(cr4);
27840+}
27841+EXPORT_SYMBOL(clear_in_cr4);
27842+
27843 /* Boot loader ID and version as integers, for the benefit of proc_dointvec */
27844 int bootloader_type, bootloader_version;
27845
27846@@ -772,7 +811,7 @@ static void __init trim_bios_range(void)
27847 * area (640->1Mb) as ram even though it is not.
27848 * take them out.
27849 */
27850- e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
27851+ e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
27852
27853 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
27854 }
27855@@ -780,7 +819,7 @@ static void __init trim_bios_range(void)
27856 /* called before trim_bios_range() to spare extra sanitize */
27857 static void __init e820_add_kernel_range(void)
27858 {
27859- u64 start = __pa_symbol(_text);
27860+ u64 start = __pa_symbol(ktla_ktva(_text));
27861 u64 size = __pa_symbol(_end) - start;
27862
27863 /*
27864@@ -856,8 +895,12 @@ dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p)
27865
27866 void __init setup_arch(char **cmdline_p)
27867 {
27868+#ifdef CONFIG_X86_32
27869+ memblock_reserve(LOAD_PHYSICAL_ADDR, __pa_symbol(__bss_stop) - LOAD_PHYSICAL_ADDR);
27870+#else
27871 memblock_reserve(__pa_symbol(_text),
27872 (unsigned long)__bss_stop - (unsigned long)_text);
27873+#endif
27874
27875 early_reserve_initrd();
27876
27877@@ -946,14 +989,14 @@ void __init setup_arch(char **cmdline_p)
27878
27879 if (!boot_params.hdr.root_flags)
27880 root_mountflags &= ~MS_RDONLY;
27881- init_mm.start_code = (unsigned long) _text;
27882- init_mm.end_code = (unsigned long) _etext;
27883+ init_mm.start_code = ktla_ktva((unsigned long) _text);
27884+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
27885 init_mm.end_data = (unsigned long) _edata;
27886 init_mm.brk = _brk_end;
27887
27888- code_resource.start = __pa_symbol(_text);
27889- code_resource.end = __pa_symbol(_etext)-1;
27890- data_resource.start = __pa_symbol(_etext);
27891+ code_resource.start = __pa_symbol(ktla_ktva(_text));
27892+ code_resource.end = __pa_symbol(ktla_ktva(_etext))-1;
27893+ data_resource.start = __pa_symbol(_sdata);
27894 data_resource.end = __pa_symbol(_edata)-1;
27895 bss_resource.start = __pa_symbol(__bss_start);
27896 bss_resource.end = __pa_symbol(__bss_stop)-1;
27897diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
27898index 5cdff03..80fa283 100644
27899--- a/arch/x86/kernel/setup_percpu.c
27900+++ b/arch/x86/kernel/setup_percpu.c
27901@@ -21,19 +21,17 @@
27902 #include <asm/cpu.h>
27903 #include <asm/stackprotector.h>
27904
27905-DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);
27906+#ifdef CONFIG_SMP
27907+DEFINE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
27908 EXPORT_PER_CPU_SYMBOL(cpu_number);
27909+#endif
27910
27911-#ifdef CONFIG_X86_64
27912 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
27913-#else
27914-#define BOOT_PERCPU_OFFSET 0
27915-#endif
27916
27917 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
27918 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
27919
27920-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
27921+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
27922 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
27923 };
27924 EXPORT_SYMBOL(__per_cpu_offset);
27925@@ -66,7 +64,7 @@ static bool __init pcpu_need_numa(void)
27926 {
27927 #ifdef CONFIG_NEED_MULTIPLE_NODES
27928 pg_data_t *last = NULL;
27929- unsigned int cpu;
27930+ int cpu;
27931
27932 for_each_possible_cpu(cpu) {
27933 int node = early_cpu_to_node(cpu);
27934@@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu)
27935 {
27936 #ifdef CONFIG_X86_32
27937 struct desc_struct gdt;
27938+ unsigned long base = per_cpu_offset(cpu);
27939
27940- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
27941- 0x2 | DESCTYPE_S, 0x8);
27942- gdt.s = 1;
27943+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
27944+ 0x83 | DESCTYPE_S, 0xC);
27945 write_gdt_entry(get_cpu_gdt_table(cpu),
27946 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
27947 #endif
27948@@ -219,6 +217,11 @@ void __init setup_per_cpu_areas(void)
27949 /* alrighty, percpu areas up and running */
27950 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
27951 for_each_possible_cpu(cpu) {
27952+#ifdef CONFIG_CC_STACKPROTECTOR
27953+#ifdef CONFIG_X86_32
27954+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
27955+#endif
27956+#endif
27957 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
27958 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
27959 per_cpu(cpu_number, cpu) = cpu;
27960@@ -259,6 +262,12 @@ void __init setup_per_cpu_areas(void)
27961 */
27962 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
27963 #endif
27964+#ifdef CONFIG_CC_STACKPROTECTOR
27965+#ifdef CONFIG_X86_32
27966+ if (!cpu)
27967+ per_cpu(stack_canary.canary, cpu) = canary;
27968+#endif
27969+#endif
27970 /*
27971 * Up to this point, the boot CPU has been using .init.data
27972 * area. Reload any changed state for the boot CPU.
27973diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
27974index 2851d63..83bf567 100644
27975--- a/arch/x86/kernel/signal.c
27976+++ b/arch/x86/kernel/signal.c
27977@@ -190,7 +190,7 @@ static unsigned long align_sigframe(unsigned long sp)
27978 * Align the stack pointer according to the i386 ABI,
27979 * i.e. so that on function entry ((sp + 4) & 15) == 0.
27980 */
27981- sp = ((sp + 4) & -16ul) - 4;
27982+ sp = ((sp - 12) & -16ul) - 4;
27983 #else /* !CONFIG_X86_32 */
27984 sp = round_down(sp, 16) - 8;
27985 #endif
27986@@ -298,10 +298,9 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
27987 }
27988
27989 if (current->mm->context.vdso)
27990- restorer = current->mm->context.vdso +
27991- selected_vdso32->sym___kernel_sigreturn;
27992+ restorer = (void __force_user *)(current->mm->context.vdso + selected_vdso32->sym___kernel_sigreturn);
27993 else
27994- restorer = &frame->retcode;
27995+ restorer = (void __user *)&frame->retcode;
27996 if (ksig->ka.sa.sa_flags & SA_RESTORER)
27997 restorer = ksig->ka.sa.sa_restorer;
27998
27999@@ -315,7 +314,7 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
28000 * reasons and because gdb uses it as a signature to notice
28001 * signal handler stack frames.
28002 */
28003- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
28004+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
28005
28006 if (err)
28007 return -EFAULT;
28008@@ -362,8 +361,10 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
28009 save_altstack_ex(&frame->uc.uc_stack, regs->sp);
28010
28011 /* Set up to return from userspace. */
28012- restorer = current->mm->context.vdso +
28013- selected_vdso32->sym___kernel_rt_sigreturn;
28014+ if (current->mm->context.vdso)
28015+ restorer = (void __force_user *)(current->mm->context.vdso + selected_vdso32->sym___kernel_rt_sigreturn);
28016+ else
28017+ restorer = (void __user *)&frame->retcode;
28018 if (ksig->ka.sa.sa_flags & SA_RESTORER)
28019 restorer = ksig->ka.sa.sa_restorer;
28020 put_user_ex(restorer, &frame->pretcode);
28021@@ -375,7 +376,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
28022 * reasons and because gdb uses it as a signature to notice
28023 * signal handler stack frames.
28024 */
28025- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
28026+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
28027 } put_user_catch(err);
28028
28029 err |= copy_siginfo_to_user(&frame->info, &ksig->info);
28030@@ -611,7 +612,12 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
28031 {
28032 int usig = signr_convert(ksig->sig);
28033 sigset_t *set = sigmask_to_save();
28034- compat_sigset_t *cset = (compat_sigset_t *) set;
28035+ sigset_t sigcopy;
28036+ compat_sigset_t *cset;
28037+
28038+ sigcopy = *set;
28039+
28040+ cset = (compat_sigset_t *) &sigcopy;
28041
28042 /* Set up the stack frame */
28043 if (is_ia32_frame()) {
28044@@ -622,7 +628,7 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
28045 } else if (is_x32_frame()) {
28046 return x32_setup_rt_frame(ksig, cset, regs);
28047 } else {
28048- return __setup_rt_frame(ksig->sig, ksig, set, regs);
28049+ return __setup_rt_frame(ksig->sig, ksig, &sigcopy, regs);
28050 }
28051 }
28052
28053diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
28054index be8e1bd..a3d93fa 100644
28055--- a/arch/x86/kernel/smp.c
28056+++ b/arch/x86/kernel/smp.c
28057@@ -341,7 +341,7 @@ static int __init nonmi_ipi_setup(char *str)
28058
28059 __setup("nonmi_ipi", nonmi_ipi_setup);
28060
28061-struct smp_ops smp_ops = {
28062+struct smp_ops smp_ops __read_only = {
28063 .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu,
28064 .smp_prepare_cpus = native_smp_prepare_cpus,
28065 .smp_cpus_done = native_smp_cpus_done,
28066diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
28067index 42a2dca..35a07aa 100644
28068--- a/arch/x86/kernel/smpboot.c
28069+++ b/arch/x86/kernel/smpboot.c
28070@@ -226,14 +226,17 @@ static void notrace start_secondary(void *unused)
28071
28072 enable_start_cpu0 = 0;
28073
28074-#ifdef CONFIG_X86_32
28075+ /* otherwise gcc will move up smp_processor_id before the cpu_init */
28076+ barrier();
28077+
28078 /* switch away from the initial page table */
28079+#ifdef CONFIG_PAX_PER_CPU_PGD
28080+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
28081+#else
28082 load_cr3(swapper_pg_dir);
28083+#endif
28084 __flush_tlb_all();
28085-#endif
28086
28087- /* otherwise gcc will move up smp_processor_id before the cpu_init */
28088- barrier();
28089 /*
28090 * Check TSC synchronization with the BP:
28091 */
28092@@ -760,8 +763,9 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
28093 alternatives_enable_smp();
28094
28095 idle->thread.sp = (unsigned long) (((struct pt_regs *)
28096- (THREAD_SIZE + task_stack_page(idle))) - 1);
28097+ (THREAD_SIZE - 16 + task_stack_page(idle))) - 1);
28098 per_cpu(current_task, cpu) = idle;
28099+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
28100
28101 #ifdef CONFIG_X86_32
28102 /* Stack for startup_32 can be just as for start_secondary onwards */
28103@@ -770,10 +774,10 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
28104 clear_tsk_thread_flag(idle, TIF_FORK);
28105 initial_gs = per_cpu_offset(cpu);
28106 #endif
28107- per_cpu(kernel_stack, cpu) =
28108- (unsigned long)task_stack_page(idle) -
28109- KERNEL_STACK_OFFSET + THREAD_SIZE;
28110+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
28111+ pax_open_kernel();
28112 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
28113+ pax_close_kernel();
28114 initial_code = (unsigned long)start_secondary;
28115 stack_start = idle->thread.sp;
28116
28117@@ -919,6 +923,15 @@ int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
28118 /* the FPU context is blank, nobody can own it */
28119 __cpu_disable_lazy_restore(cpu);
28120
28121+#ifdef CONFIG_PAX_PER_CPU_PGD
28122+ clone_pgd_range(get_cpu_pgd(cpu, kernel) + KERNEL_PGD_BOUNDARY,
28123+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
28124+ KERNEL_PGD_PTRS);
28125+ clone_pgd_range(get_cpu_pgd(cpu, user) + KERNEL_PGD_BOUNDARY,
28126+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
28127+ KERNEL_PGD_PTRS);
28128+#endif
28129+
28130 err = do_boot_cpu(apicid, cpu, tidle);
28131 if (err) {
28132 pr_err("do_boot_cpu failed(%d) to wakeup CPU#%u\n", err, cpu);
28133diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
28134index 9b4d51d..5d28b58 100644
28135--- a/arch/x86/kernel/step.c
28136+++ b/arch/x86/kernel/step.c
28137@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
28138 struct desc_struct *desc;
28139 unsigned long base;
28140
28141- seg &= ~7UL;
28142+ seg >>= 3;
28143
28144 mutex_lock(&child->mm->context.lock);
28145- if (unlikely((seg >> 3) >= child->mm->context.size))
28146+ if (unlikely(seg >= child->mm->context.size))
28147 addr = -1L; /* bogus selector, access would fault */
28148 else {
28149 desc = child->mm->context.ldt + seg;
28150@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
28151 addr += base;
28152 }
28153 mutex_unlock(&child->mm->context.lock);
28154- }
28155+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
28156+ addr = ktla_ktva(addr);
28157
28158 return addr;
28159 }
28160@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
28161 unsigned char opcode[15];
28162 unsigned long addr = convert_ip_to_linear(child, regs);
28163
28164+ if (addr == -EINVAL)
28165+ return 0;
28166+
28167 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
28168 for (i = 0; i < copied; i++) {
28169 switch (opcode[i]) {
28170diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
28171new file mode 100644
28172index 0000000..5877189
28173--- /dev/null
28174+++ b/arch/x86/kernel/sys_i386_32.c
28175@@ -0,0 +1,189 @@
28176+/*
28177+ * This file contains various random system calls that
28178+ * have a non-standard calling sequence on the Linux/i386
28179+ * platform.
28180+ */
28181+
28182+#include <linux/errno.h>
28183+#include <linux/sched.h>
28184+#include <linux/mm.h>
28185+#include <linux/fs.h>
28186+#include <linux/smp.h>
28187+#include <linux/sem.h>
28188+#include <linux/msg.h>
28189+#include <linux/shm.h>
28190+#include <linux/stat.h>
28191+#include <linux/syscalls.h>
28192+#include <linux/mman.h>
28193+#include <linux/file.h>
28194+#include <linux/utsname.h>
28195+#include <linux/ipc.h>
28196+#include <linux/elf.h>
28197+
28198+#include <linux/uaccess.h>
28199+#include <linux/unistd.h>
28200+
28201+#include <asm/syscalls.h>
28202+
28203+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
28204+{
28205+ unsigned long pax_task_size = TASK_SIZE;
28206+
28207+#ifdef CONFIG_PAX_SEGMEXEC
28208+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
28209+ pax_task_size = SEGMEXEC_TASK_SIZE;
28210+#endif
28211+
28212+ if (flags & MAP_FIXED)
28213+ if (len > pax_task_size || addr > pax_task_size - len)
28214+ return -EINVAL;
28215+
28216+ return 0;
28217+}
28218+
28219+/*
28220+ * Align a virtual address to avoid aliasing in the I$ on AMD F15h.
28221+ */
28222+static unsigned long get_align_mask(void)
28223+{
28224+ if (va_align.flags < 0 || !(va_align.flags & ALIGN_VA_32))
28225+ return 0;
28226+
28227+ if (!(current->flags & PF_RANDOMIZE))
28228+ return 0;
28229+
28230+ return va_align.mask;
28231+}
28232+
28233+unsigned long
28234+arch_get_unmapped_area(struct file *filp, unsigned long addr,
28235+ unsigned long len, unsigned long pgoff, unsigned long flags)
28236+{
28237+ struct mm_struct *mm = current->mm;
28238+ struct vm_area_struct *vma;
28239+ unsigned long pax_task_size = TASK_SIZE;
28240+ struct vm_unmapped_area_info info;
28241+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
28242+
28243+#ifdef CONFIG_PAX_SEGMEXEC
28244+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
28245+ pax_task_size = SEGMEXEC_TASK_SIZE;
28246+#endif
28247+
28248+ pax_task_size -= PAGE_SIZE;
28249+
28250+ if (len > pax_task_size)
28251+ return -ENOMEM;
28252+
28253+ if (flags & MAP_FIXED)
28254+ return addr;
28255+
28256+#ifdef CONFIG_PAX_RANDMMAP
28257+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
28258+#endif
28259+
28260+ if (addr) {
28261+ addr = PAGE_ALIGN(addr);
28262+ if (pax_task_size - len >= addr) {
28263+ vma = find_vma(mm, addr);
28264+ if (check_heap_stack_gap(vma, addr, len, offset))
28265+ return addr;
28266+ }
28267+ }
28268+
28269+ info.flags = 0;
28270+ info.length = len;
28271+ info.align_mask = filp ? get_align_mask() : 0;
28272+ info.align_offset = pgoff << PAGE_SHIFT;
28273+ info.threadstack_offset = offset;
28274+
28275+#ifdef CONFIG_PAX_PAGEEXEC
28276+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE)) {
28277+ info.low_limit = 0x00110000UL;
28278+ info.high_limit = mm->start_code;
28279+
28280+#ifdef CONFIG_PAX_RANDMMAP
28281+ if (mm->pax_flags & MF_PAX_RANDMMAP)
28282+ info.low_limit += mm->delta_mmap & 0x03FFF000UL;
28283+#endif
28284+
28285+ if (info.low_limit < info.high_limit) {
28286+ addr = vm_unmapped_area(&info);
28287+ if (!IS_ERR_VALUE(addr))
28288+ return addr;
28289+ }
28290+ } else
28291+#endif
28292+
28293+ info.low_limit = mm->mmap_base;
28294+ info.high_limit = pax_task_size;
28295+
28296+ return vm_unmapped_area(&info);
28297+}
28298+
28299+unsigned long
28300+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
28301+ const unsigned long len, const unsigned long pgoff,
28302+ const unsigned long flags)
28303+{
28304+ struct vm_area_struct *vma;
28305+ struct mm_struct *mm = current->mm;
28306+ unsigned long addr = addr0, pax_task_size = TASK_SIZE;
28307+ struct vm_unmapped_area_info info;
28308+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
28309+
28310+#ifdef CONFIG_PAX_SEGMEXEC
28311+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
28312+ pax_task_size = SEGMEXEC_TASK_SIZE;
28313+#endif
28314+
28315+ pax_task_size -= PAGE_SIZE;
28316+
28317+ /* requested length too big for entire address space */
28318+ if (len > pax_task_size)
28319+ return -ENOMEM;
28320+
28321+ if (flags & MAP_FIXED)
28322+ return addr;
28323+
28324+#ifdef CONFIG_PAX_PAGEEXEC
28325+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
28326+ goto bottomup;
28327+#endif
28328+
28329+#ifdef CONFIG_PAX_RANDMMAP
28330+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
28331+#endif
28332+
28333+ /* requesting a specific address */
28334+ if (addr) {
28335+ addr = PAGE_ALIGN(addr);
28336+ if (pax_task_size - len >= addr) {
28337+ vma = find_vma(mm, addr);
28338+ if (check_heap_stack_gap(vma, addr, len, offset))
28339+ return addr;
28340+ }
28341+ }
28342+
28343+ info.flags = VM_UNMAPPED_AREA_TOPDOWN;
28344+ info.length = len;
28345+ info.low_limit = PAGE_SIZE;
28346+ info.high_limit = mm->mmap_base;
28347+ info.align_mask = filp ? get_align_mask() : 0;
28348+ info.align_offset = pgoff << PAGE_SHIFT;
28349+ info.threadstack_offset = offset;
28350+
28351+ addr = vm_unmapped_area(&info);
28352+ if (!(addr & ~PAGE_MASK))
28353+ return addr;
28354+ VM_BUG_ON(addr != -ENOMEM);
28355+
28356+bottomup:
28357+ /*
28358+ * A failed mmap() very likely causes application failure,
28359+ * so fall back to the bottom-up function here. This scenario
28360+ * can happen with large stack limits and large mmap()
28361+ * allocations.
28362+ */
28363+ return arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
28364+}
28365diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
28366index 30277e2..5664a29 100644
28367--- a/arch/x86/kernel/sys_x86_64.c
28368+++ b/arch/x86/kernel/sys_x86_64.c
28369@@ -81,8 +81,8 @@ out:
28370 return error;
28371 }
28372
28373-static void find_start_end(unsigned long flags, unsigned long *begin,
28374- unsigned long *end)
28375+static void find_start_end(struct mm_struct *mm, unsigned long flags,
28376+ unsigned long *begin, unsigned long *end)
28377 {
28378 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) {
28379 unsigned long new_begin;
28380@@ -101,7 +101,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
28381 *begin = new_begin;
28382 }
28383 } else {
28384- *begin = current->mm->mmap_legacy_base;
28385+ *begin = mm->mmap_legacy_base;
28386 *end = TASK_SIZE;
28387 }
28388 }
28389@@ -114,20 +114,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
28390 struct vm_area_struct *vma;
28391 struct vm_unmapped_area_info info;
28392 unsigned long begin, end;
28393+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
28394
28395 if (flags & MAP_FIXED)
28396 return addr;
28397
28398- find_start_end(flags, &begin, &end);
28399+ find_start_end(mm, flags, &begin, &end);
28400
28401 if (len > end)
28402 return -ENOMEM;
28403
28404+#ifdef CONFIG_PAX_RANDMMAP
28405+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
28406+#endif
28407+
28408 if (addr) {
28409 addr = PAGE_ALIGN(addr);
28410 vma = find_vma(mm, addr);
28411- if (end - len >= addr &&
28412- (!vma || addr + len <= vma->vm_start))
28413+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
28414 return addr;
28415 }
28416
28417@@ -137,6 +141,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
28418 info.high_limit = end;
28419 info.align_mask = filp ? get_align_mask() : 0;
28420 info.align_offset = pgoff << PAGE_SHIFT;
28421+ info.threadstack_offset = offset;
28422 return vm_unmapped_area(&info);
28423 }
28424
28425@@ -149,6 +154,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
28426 struct mm_struct *mm = current->mm;
28427 unsigned long addr = addr0;
28428 struct vm_unmapped_area_info info;
28429+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
28430
28431 /* requested length too big for entire address space */
28432 if (len > TASK_SIZE)
28433@@ -161,12 +167,15 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
28434 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT))
28435 goto bottomup;
28436
28437+#ifdef CONFIG_PAX_RANDMMAP
28438+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
28439+#endif
28440+
28441 /* requesting a specific address */
28442 if (addr) {
28443 addr = PAGE_ALIGN(addr);
28444 vma = find_vma(mm, addr);
28445- if (TASK_SIZE - len >= addr &&
28446- (!vma || addr + len <= vma->vm_start))
28447+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
28448 return addr;
28449 }
28450
28451@@ -176,6 +185,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
28452 info.high_limit = mm->mmap_base;
28453 info.align_mask = filp ? get_align_mask() : 0;
28454 info.align_offset = pgoff << PAGE_SHIFT;
28455+ info.threadstack_offset = offset;
28456 addr = vm_unmapped_area(&info);
28457 if (!(addr & ~PAGE_MASK))
28458 return addr;
28459diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
28460index 91a4496..bb87552 100644
28461--- a/arch/x86/kernel/tboot.c
28462+++ b/arch/x86/kernel/tboot.c
28463@@ -221,7 +221,7 @@ static int tboot_setup_sleep(void)
28464
28465 void tboot_shutdown(u32 shutdown_type)
28466 {
28467- void (*shutdown)(void);
28468+ void (* __noreturn shutdown)(void);
28469
28470 if (!tboot_enabled())
28471 return;
28472@@ -243,7 +243,7 @@ void tboot_shutdown(u32 shutdown_type)
28473
28474 switch_to_tboot_pt();
28475
28476- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
28477+ shutdown = (void *)(unsigned long)tboot->shutdown_entry;
28478 shutdown();
28479
28480 /* should not reach here */
28481@@ -310,7 +310,7 @@ static int tboot_extended_sleep(u8 sleep_state, u32 val_a, u32 val_b)
28482 return -ENODEV;
28483 }
28484
28485-static atomic_t ap_wfs_count;
28486+static atomic_unchecked_t ap_wfs_count;
28487
28488 static int tboot_wait_for_aps(int num_aps)
28489 {
28490@@ -334,9 +334,9 @@ static int tboot_cpu_callback(struct notifier_block *nfb, unsigned long action,
28491 {
28492 switch (action) {
28493 case CPU_DYING:
28494- atomic_inc(&ap_wfs_count);
28495+ atomic_inc_unchecked(&ap_wfs_count);
28496 if (num_online_cpus() == 1)
28497- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
28498+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
28499 return NOTIFY_BAD;
28500 break;
28501 }
28502@@ -422,7 +422,7 @@ static __init int tboot_late_init(void)
28503
28504 tboot_create_trampoline();
28505
28506- atomic_set(&ap_wfs_count, 0);
28507+ atomic_set_unchecked(&ap_wfs_count, 0);
28508 register_hotcpu_notifier(&tboot_cpu_notifier);
28509
28510 #ifdef CONFIG_DEBUG_FS
28511diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
28512index 0fa2960..91eabbe 100644
28513--- a/arch/x86/kernel/time.c
28514+++ b/arch/x86/kernel/time.c
28515@@ -30,9 +30,9 @@ unsigned long profile_pc(struct pt_regs *regs)
28516 {
28517 unsigned long pc = instruction_pointer(regs);
28518
28519- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
28520+ if (!user_mode(regs) && in_lock_functions(pc)) {
28521 #ifdef CONFIG_FRAME_POINTER
28522- return *(unsigned long *)(regs->bp + sizeof(long));
28523+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
28524 #else
28525 unsigned long *sp =
28526 (unsigned long *)kernel_stack_pointer(regs);
28527@@ -41,11 +41,17 @@ unsigned long profile_pc(struct pt_regs *regs)
28528 * or above a saved flags. Eflags has bits 22-31 zero,
28529 * kernel addresses don't.
28530 */
28531+
28532+#ifdef CONFIG_PAX_KERNEXEC
28533+ return ktla_ktva(sp[0]);
28534+#else
28535 if (sp[0] >> 22)
28536 return sp[0];
28537 if (sp[1] >> 22)
28538 return sp[1];
28539 #endif
28540+
28541+#endif
28542 }
28543 return pc;
28544 }
28545diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
28546index f7fec09..9991981 100644
28547--- a/arch/x86/kernel/tls.c
28548+++ b/arch/x86/kernel/tls.c
28549@@ -84,6 +84,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
28550 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
28551 return -EINVAL;
28552
28553+#ifdef CONFIG_PAX_SEGMEXEC
28554+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
28555+ return -EINVAL;
28556+#endif
28557+
28558 set_tls_desc(p, idx, &info, 1);
28559
28560 return 0;
28561@@ -200,7 +205,7 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset,
28562
28563 if (kbuf)
28564 info = kbuf;
28565- else if (__copy_from_user(infobuf, ubuf, count))
28566+ else if (count > sizeof infobuf || __copy_from_user(infobuf, ubuf, count))
28567 return -EFAULT;
28568 else
28569 info = infobuf;
28570diff --git a/arch/x86/kernel/tracepoint.c b/arch/x86/kernel/tracepoint.c
28571index 1c113db..287b42e 100644
28572--- a/arch/x86/kernel/tracepoint.c
28573+++ b/arch/x86/kernel/tracepoint.c
28574@@ -9,11 +9,11 @@
28575 #include <linux/atomic.h>
28576
28577 atomic_t trace_idt_ctr = ATOMIC_INIT(0);
28578-struct desc_ptr trace_idt_descr = { NR_VECTORS * 16 - 1,
28579+const struct desc_ptr trace_idt_descr = { NR_VECTORS * 16 - 1,
28580 (unsigned long) trace_idt_table };
28581
28582 /* No need to be aligned, but done to keep all IDTs defined the same way. */
28583-gate_desc trace_idt_table[NR_VECTORS] __page_aligned_bss;
28584+gate_desc trace_idt_table[NR_VECTORS] __page_aligned_rodata;
28585
28586 static int trace_irq_vector_refcount;
28587 static DEFINE_MUTEX(irq_vector_mutex);
28588diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
28589index 0d0e922..0886373 100644
28590--- a/arch/x86/kernel/traps.c
28591+++ b/arch/x86/kernel/traps.c
28592@@ -67,7 +67,7 @@
28593 #include <asm/proto.h>
28594
28595 /* No need to be aligned, but done to keep all IDTs defined the same way. */
28596-gate_desc debug_idt_table[NR_VECTORS] __page_aligned_bss;
28597+gate_desc debug_idt_table[NR_VECTORS] __page_aligned_rodata;
28598 #else
28599 #include <asm/processor-flags.h>
28600 #include <asm/setup.h>
28601@@ -76,7 +76,7 @@ asmlinkage int system_call(void);
28602 #endif
28603
28604 /* Must be page-aligned because the real IDT is used in a fixmap. */
28605-gate_desc idt_table[NR_VECTORS] __page_aligned_bss;
28606+gate_desc idt_table[NR_VECTORS] __page_aligned_rodata;
28607
28608 DECLARE_BITMAP(used_vectors, NR_VECTORS);
28609 EXPORT_SYMBOL_GPL(used_vectors);
28610@@ -108,11 +108,11 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
28611 }
28612
28613 static nokprobe_inline int
28614-do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
28615+do_trap_no_signal(struct task_struct *tsk, int trapnr, const char *str,
28616 struct pt_regs *regs, long error_code)
28617 {
28618 #ifdef CONFIG_X86_32
28619- if (regs->flags & X86_VM_MASK) {
28620+ if (v8086_mode(regs)) {
28621 /*
28622 * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
28623 * On nmi (interrupt 2), do_trap should not be called.
28624@@ -125,12 +125,24 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
28625 return -1;
28626 }
28627 #endif
28628- if (!user_mode(regs)) {
28629+ if (!user_mode_novm(regs)) {
28630 if (!fixup_exception(regs)) {
28631 tsk->thread.error_code = error_code;
28632 tsk->thread.trap_nr = trapnr;
28633+
28634+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28635+ if (trapnr == X86_TRAP_SS && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
28636+ str = "PAX: suspicious stack segment fault";
28637+#endif
28638+
28639 die(str, regs, error_code);
28640 }
28641+
28642+#ifdef CONFIG_PAX_REFCOUNT
28643+ if (trapnr == X86_TRAP_OF)
28644+ pax_report_refcount_overflow(regs);
28645+#endif
28646+
28647 return 0;
28648 }
28649
28650@@ -169,7 +181,7 @@ static siginfo_t *fill_trap_info(struct pt_regs *regs, int signr, int trapnr,
28651 }
28652
28653 static void
28654-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
28655+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
28656 long error_code, siginfo_t *info)
28657 {
28658 struct task_struct *tsk = current;
28659@@ -193,7 +205,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
28660 if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
28661 printk_ratelimit()) {
28662 pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx",
28663- tsk->comm, tsk->pid, str,
28664+ tsk->comm, task_pid_nr(tsk), str,
28665 regs->ip, regs->sp, error_code);
28666 print_vma_addr(" in ", regs->ip);
28667 pr_cont("\n");
28668@@ -266,6 +278,11 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
28669 tsk->thread.error_code = error_code;
28670 tsk->thread.trap_nr = X86_TRAP_DF;
28671
28672+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
28673+ if ((unsigned long)tsk->stack - regs->sp <= PAGE_SIZE)
28674+ die("grsec: kernel stack overflow detected", regs, error_code);
28675+#endif
28676+
28677 #ifdef CONFIG_DOUBLEFAULT
28678 df_debug(regs, error_code);
28679 #endif
28680@@ -288,7 +305,7 @@ do_general_protection(struct pt_regs *regs, long error_code)
28681 conditional_sti(regs);
28682
28683 #ifdef CONFIG_X86_32
28684- if (regs->flags & X86_VM_MASK) {
28685+ if (v8086_mode(regs)) {
28686 local_irq_enable();
28687 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
28688 goto exit;
28689@@ -296,18 +313,42 @@ do_general_protection(struct pt_regs *regs, long error_code)
28690 #endif
28691
28692 tsk = current;
28693- if (!user_mode(regs)) {
28694+ if (!user_mode_novm(regs)) {
28695 if (fixup_exception(regs))
28696 goto exit;
28697
28698 tsk->thread.error_code = error_code;
28699 tsk->thread.trap_nr = X86_TRAP_GP;
28700 if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
28701- X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP)
28702+ X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP) {
28703+
28704+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28705+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
28706+ die("PAX: suspicious general protection fault", regs, error_code);
28707+ else
28708+#endif
28709+
28710 die("general protection fault", regs, error_code);
28711+ }
28712 goto exit;
28713 }
28714
28715+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
28716+ if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
28717+ struct mm_struct *mm = tsk->mm;
28718+ unsigned long limit;
28719+
28720+ down_write(&mm->mmap_sem);
28721+ limit = mm->context.user_cs_limit;
28722+ if (limit < TASK_SIZE) {
28723+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
28724+ up_write(&mm->mmap_sem);
28725+ return;
28726+ }
28727+ up_write(&mm->mmap_sem);
28728+ }
28729+#endif
28730+
28731 tsk->thread.error_code = error_code;
28732 tsk->thread.trap_nr = X86_TRAP_GP;
28733
28734@@ -481,7 +522,7 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
28735 /* It's safe to allow irq's after DR6 has been saved */
28736 preempt_conditional_sti(regs);
28737
28738- if (regs->flags & X86_VM_MASK) {
28739+ if (v8086_mode(regs)) {
28740 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
28741 X86_TRAP_DB);
28742 preempt_conditional_cli(regs);
28743@@ -496,7 +537,7 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
28744 * We already checked v86 mode above, so we can check for kernel mode
28745 * by just checking the CPL of CS.
28746 */
28747- if ((dr6 & DR_STEP) && !user_mode(regs)) {
28748+ if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
28749 tsk->thread.debugreg6 &= ~DR_STEP;
28750 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
28751 regs->flags &= ~X86_EFLAGS_TF;
28752@@ -529,7 +570,7 @@ static void math_error(struct pt_regs *regs, int error_code, int trapnr)
28753 return;
28754 conditional_sti(regs);
28755
28756- if (!user_mode_vm(regs))
28757+ if (!user_mode(regs))
28758 {
28759 if (!fixup_exception(regs)) {
28760 task->thread.error_code = error_code;
28761diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
28762index b6025f9..0cc6a1d 100644
28763--- a/arch/x86/kernel/tsc.c
28764+++ b/arch/x86/kernel/tsc.c
28765@@ -150,7 +150,7 @@ static void cyc2ns_write_end(int cpu, struct cyc2ns_data *data)
28766 */
28767 smp_wmb();
28768
28769- ACCESS_ONCE(c2n->head) = data;
28770+ ACCESS_ONCE_RW(c2n->head) = data;
28771 }
28772
28773 /*
28774diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
28775index 5d1cbfe..2a21feb 100644
28776--- a/arch/x86/kernel/uprobes.c
28777+++ b/arch/x86/kernel/uprobes.c
28778@@ -845,7 +845,7 @@ int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val,
28779 int ret = NOTIFY_DONE;
28780
28781 /* We are only interested in userspace traps */
28782- if (regs && !user_mode_vm(regs))
28783+ if (regs && !user_mode(regs))
28784 return NOTIFY_DONE;
28785
28786 switch (val) {
28787@@ -919,7 +919,7 @@ arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs
28788
28789 if (nleft != rasize) {
28790 pr_err("uprobe: return address clobbered: pid=%d, %%sp=%#lx, "
28791- "%%ip=%#lx\n", current->pid, regs->sp, regs->ip);
28792+ "%%ip=%#lx\n", task_pid_nr(current), regs->sp, regs->ip);
28793
28794 force_sig_info(SIGSEGV, SEND_SIG_FORCED, current);
28795 }
28796diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
28797index b9242ba..50c5edd 100644
28798--- a/arch/x86/kernel/verify_cpu.S
28799+++ b/arch/x86/kernel/verify_cpu.S
28800@@ -20,6 +20,7 @@
28801 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
28802 * arch/x86/kernel/trampoline_64.S: secondary processor verification
28803 * arch/x86/kernel/head_32.S: processor startup
28804+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
28805 *
28806 * verify_cpu, returns the status of longmode and SSE in register %eax.
28807 * 0: Success 1: Failure
28808diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
28809index e8edcf5..27f9344 100644
28810--- a/arch/x86/kernel/vm86_32.c
28811+++ b/arch/x86/kernel/vm86_32.c
28812@@ -44,6 +44,7 @@
28813 #include <linux/ptrace.h>
28814 #include <linux/audit.h>
28815 #include <linux/stddef.h>
28816+#include <linux/grsecurity.h>
28817
28818 #include <asm/uaccess.h>
28819 #include <asm/io.h>
28820@@ -150,7 +151,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
28821 do_exit(SIGSEGV);
28822 }
28823
28824- tss = &per_cpu(init_tss, get_cpu());
28825+ tss = init_tss + get_cpu();
28826 current->thread.sp0 = current->thread.saved_sp0;
28827 current->thread.sysenter_cs = __KERNEL_CS;
28828 load_sp0(tss, &current->thread);
28829@@ -214,6 +215,14 @@ SYSCALL_DEFINE1(vm86old, struct vm86_struct __user *, v86)
28830
28831 if (tsk->thread.saved_sp0)
28832 return -EPERM;
28833+
28834+#ifdef CONFIG_GRKERNSEC_VM86
28835+ if (!capable(CAP_SYS_RAWIO)) {
28836+ gr_handle_vm86();
28837+ return -EPERM;
28838+ }
28839+#endif
28840+
28841 tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs,
28842 offsetof(struct kernel_vm86_struct, vm86plus) -
28843 sizeof(info.regs));
28844@@ -238,6 +247,13 @@ SYSCALL_DEFINE2(vm86, unsigned long, cmd, unsigned long, arg)
28845 int tmp;
28846 struct vm86plus_struct __user *v86;
28847
28848+#ifdef CONFIG_GRKERNSEC_VM86
28849+ if (!capable(CAP_SYS_RAWIO)) {
28850+ gr_handle_vm86();
28851+ return -EPERM;
28852+ }
28853+#endif
28854+
28855 tsk = current;
28856 switch (cmd) {
28857 case VM86_REQUEST_IRQ:
28858@@ -318,7 +334,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
28859 tsk->thread.saved_fs = info->regs32->fs;
28860 tsk->thread.saved_gs = get_user_gs(info->regs32);
28861
28862- tss = &per_cpu(init_tss, get_cpu());
28863+ tss = init_tss + get_cpu();
28864 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
28865 if (cpu_has_sep)
28866 tsk->thread.sysenter_cs = 0;
28867@@ -525,7 +541,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
28868 goto cannot_handle;
28869 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
28870 goto cannot_handle;
28871- intr_ptr = (unsigned long __user *) (i << 2);
28872+ intr_ptr = (__force unsigned long __user *) (i << 2);
28873 if (get_user(segoffs, intr_ptr))
28874 goto cannot_handle;
28875 if ((segoffs >> 16) == BIOSSEG)
28876diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
28877index 49edf2d..c0d1362 100644
28878--- a/arch/x86/kernel/vmlinux.lds.S
28879+++ b/arch/x86/kernel/vmlinux.lds.S
28880@@ -26,6 +26,13 @@
28881 #include <asm/page_types.h>
28882 #include <asm/cache.h>
28883 #include <asm/boot.h>
28884+#include <asm/segment.h>
28885+
28886+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28887+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
28888+#else
28889+#define __KERNEL_TEXT_OFFSET 0
28890+#endif
28891
28892 #undef i386 /* in case the preprocessor is a 32bit one */
28893
28894@@ -69,30 +76,43 @@ jiffies_64 = jiffies;
28895
28896 PHDRS {
28897 text PT_LOAD FLAGS(5); /* R_E */
28898+#ifdef CONFIG_X86_32
28899+ module PT_LOAD FLAGS(5); /* R_E */
28900+#endif
28901+#ifdef CONFIG_XEN
28902+ rodata PT_LOAD FLAGS(5); /* R_E */
28903+#else
28904+ rodata PT_LOAD FLAGS(4); /* R__ */
28905+#endif
28906 data PT_LOAD FLAGS(6); /* RW_ */
28907-#ifdef CONFIG_X86_64
28908+ init.begin PT_LOAD FLAGS(6); /* RW_ */
28909 #ifdef CONFIG_SMP
28910 percpu PT_LOAD FLAGS(6); /* RW_ */
28911 #endif
28912+ text.init PT_LOAD FLAGS(5); /* R_E */
28913+ text.exit PT_LOAD FLAGS(5); /* R_E */
28914 init PT_LOAD FLAGS(7); /* RWE */
28915-#endif
28916 note PT_NOTE FLAGS(0); /* ___ */
28917 }
28918
28919 SECTIONS
28920 {
28921 #ifdef CONFIG_X86_32
28922- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
28923- phys_startup_32 = startup_32 - LOAD_OFFSET;
28924+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
28925 #else
28926- . = __START_KERNEL;
28927- phys_startup_64 = startup_64 - LOAD_OFFSET;
28928+ . = __START_KERNEL;
28929 #endif
28930
28931 /* Text and read-only data */
28932- .text : AT(ADDR(.text) - LOAD_OFFSET) {
28933- _text = .;
28934+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
28935 /* bootstrapping code */
28936+#ifdef CONFIG_X86_32
28937+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
28938+#else
28939+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
28940+#endif
28941+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
28942+ _text = .;
28943 HEAD_TEXT
28944 . = ALIGN(8);
28945 _stext = .;
28946@@ -104,13 +124,47 @@ SECTIONS
28947 IRQENTRY_TEXT
28948 *(.fixup)
28949 *(.gnu.warning)
28950- /* End of text section */
28951- _etext = .;
28952 } :text = 0x9090
28953
28954- NOTES :text :note
28955+ . += __KERNEL_TEXT_OFFSET;
28956
28957- EXCEPTION_TABLE(16) :text = 0x9090
28958+#ifdef CONFIG_X86_32
28959+ . = ALIGN(PAGE_SIZE);
28960+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
28961+
28962+#ifdef CONFIG_PAX_KERNEXEC
28963+ MODULES_EXEC_VADDR = .;
28964+ BYTE(0)
28965+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
28966+ . = ALIGN(HPAGE_SIZE) - 1;
28967+ MODULES_EXEC_END = .;
28968+#endif
28969+
28970+ } :module
28971+#endif
28972+
28973+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
28974+ /* End of text section */
28975+ BYTE(0)
28976+ _etext = . - __KERNEL_TEXT_OFFSET;
28977+ }
28978+
28979+#ifdef CONFIG_X86_32
28980+ . = ALIGN(PAGE_SIZE);
28981+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
28982+ . = ALIGN(PAGE_SIZE);
28983+ *(.empty_zero_page)
28984+ *(.initial_pg_fixmap)
28985+ *(.initial_pg_pmd)
28986+ *(.initial_page_table)
28987+ *(.swapper_pg_dir)
28988+ } :rodata
28989+#endif
28990+
28991+ . = ALIGN(PAGE_SIZE);
28992+ NOTES :rodata :note
28993+
28994+ EXCEPTION_TABLE(16) :rodata
28995
28996 #if defined(CONFIG_DEBUG_RODATA)
28997 /* .text should occupy whole number of pages */
28998@@ -122,16 +176,20 @@ SECTIONS
28999
29000 /* Data */
29001 .data : AT(ADDR(.data) - LOAD_OFFSET) {
29002+
29003+#ifdef CONFIG_PAX_KERNEXEC
29004+ . = ALIGN(HPAGE_SIZE);
29005+#else
29006+ . = ALIGN(PAGE_SIZE);
29007+#endif
29008+
29009 /* Start of data section */
29010 _sdata = .;
29011
29012 /* init_task */
29013 INIT_TASK_DATA(THREAD_SIZE)
29014
29015-#ifdef CONFIG_X86_32
29016- /* 32 bit has nosave before _edata */
29017 NOSAVE_DATA
29018-#endif
29019
29020 PAGE_ALIGNED_DATA(PAGE_SIZE)
29021
29022@@ -174,12 +232,19 @@ SECTIONS
29023 . = ALIGN(__vvar_page + PAGE_SIZE, PAGE_SIZE);
29024
29025 /* Init code and data - will be freed after init */
29026- . = ALIGN(PAGE_SIZE);
29027 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
29028+ BYTE(0)
29029+
29030+#ifdef CONFIG_PAX_KERNEXEC
29031+ . = ALIGN(HPAGE_SIZE);
29032+#else
29033+ . = ALIGN(PAGE_SIZE);
29034+#endif
29035+
29036 __init_begin = .; /* paired with __init_end */
29037- }
29038+ } :init.begin
29039
29040-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
29041+#ifdef CONFIG_SMP
29042 /*
29043 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
29044 * output PHDR, so the next output section - .init.text - should
29045@@ -188,12 +253,27 @@ SECTIONS
29046 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
29047 #endif
29048
29049- INIT_TEXT_SECTION(PAGE_SIZE)
29050-#ifdef CONFIG_X86_64
29051- :init
29052-#endif
29053+ . = ALIGN(PAGE_SIZE);
29054+ init_begin = .;
29055+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
29056+ VMLINUX_SYMBOL(_sinittext) = .;
29057+ INIT_TEXT
29058+ VMLINUX_SYMBOL(_einittext) = .;
29059+ . = ALIGN(PAGE_SIZE);
29060+ } :text.init
29061
29062- INIT_DATA_SECTION(16)
29063+ /*
29064+ * .exit.text is discard at runtime, not link time, to deal with
29065+ * references from .altinstructions and .eh_frame
29066+ */
29067+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
29068+ EXIT_TEXT
29069+ . = ALIGN(16);
29070+ } :text.exit
29071+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
29072+
29073+ . = ALIGN(PAGE_SIZE);
29074+ INIT_DATA_SECTION(16) :init
29075
29076 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
29077 __x86_cpu_dev_start = .;
29078@@ -264,19 +344,12 @@ SECTIONS
29079 }
29080
29081 . = ALIGN(8);
29082- /*
29083- * .exit.text is discard at runtime, not link time, to deal with
29084- * references from .altinstructions and .eh_frame
29085- */
29086- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
29087- EXIT_TEXT
29088- }
29089
29090 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
29091 EXIT_DATA
29092 }
29093
29094-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
29095+#ifndef CONFIG_SMP
29096 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
29097 #endif
29098
29099@@ -295,16 +368,10 @@ SECTIONS
29100 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
29101 __smp_locks = .;
29102 *(.smp_locks)
29103- . = ALIGN(PAGE_SIZE);
29104 __smp_locks_end = .;
29105+ . = ALIGN(PAGE_SIZE);
29106 }
29107
29108-#ifdef CONFIG_X86_64
29109- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
29110- NOSAVE_DATA
29111- }
29112-#endif
29113-
29114 /* BSS */
29115 . = ALIGN(PAGE_SIZE);
29116 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
29117@@ -320,6 +387,7 @@ SECTIONS
29118 __brk_base = .;
29119 . += 64 * 1024; /* 64k alignment slop space */
29120 *(.brk_reservation) /* areas brk users have reserved */
29121+ . = ALIGN(HPAGE_SIZE);
29122 __brk_limit = .;
29123 }
29124
29125@@ -346,13 +414,12 @@ SECTIONS
29126 * for the boot processor.
29127 */
29128 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
29129-INIT_PER_CPU(gdt_page);
29130 INIT_PER_CPU(irq_stack_union);
29131
29132 /*
29133 * Build-time check on the image size:
29134 */
29135-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
29136+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
29137 "kernel image bigger than KERNEL_IMAGE_SIZE");
29138
29139 #ifdef CONFIG_SMP
29140diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
29141index e1e1e80..1400089 100644
29142--- a/arch/x86/kernel/vsyscall_64.c
29143+++ b/arch/x86/kernel/vsyscall_64.c
29144@@ -54,15 +54,13 @@
29145
29146 DEFINE_VVAR(int, vgetcpu_mode);
29147
29148-static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
29149+static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
29150
29151 static int __init vsyscall_setup(char *str)
29152 {
29153 if (str) {
29154 if (!strcmp("emulate", str))
29155 vsyscall_mode = EMULATE;
29156- else if (!strcmp("native", str))
29157- vsyscall_mode = NATIVE;
29158 else if (!strcmp("none", str))
29159 vsyscall_mode = NONE;
29160 else
29161@@ -279,8 +277,7 @@ do_ret:
29162 return true;
29163
29164 sigsegv:
29165- force_sig(SIGSEGV, current);
29166- return true;
29167+ do_group_exit(SIGKILL);
29168 }
29169
29170 /*
29171@@ -331,10 +328,7 @@ void __init map_vsyscall(void)
29172 extern char __vsyscall_page;
29173 unsigned long physaddr_vsyscall = __pa_symbol(&__vsyscall_page);
29174
29175- __set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall,
29176- vsyscall_mode == NATIVE
29177- ? PAGE_KERNEL_VSYSCALL
29178- : PAGE_KERNEL_VVAR);
29179+ __set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
29180 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_PAGE) !=
29181 (unsigned long)VSYSCALL_ADDR);
29182 }
29183diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
29184index 04068192..4d75aa6 100644
29185--- a/arch/x86/kernel/x8664_ksyms_64.c
29186+++ b/arch/x86/kernel/x8664_ksyms_64.c
29187@@ -34,8 +34,6 @@ EXPORT_SYMBOL(copy_user_generic_string);
29188 EXPORT_SYMBOL(copy_user_generic_unrolled);
29189 EXPORT_SYMBOL(copy_user_enhanced_fast_string);
29190 EXPORT_SYMBOL(__copy_user_nocache);
29191-EXPORT_SYMBOL(_copy_from_user);
29192-EXPORT_SYMBOL(_copy_to_user);
29193
29194 EXPORT_SYMBOL(copy_page);
29195 EXPORT_SYMBOL(clear_page);
29196@@ -73,3 +71,7 @@ EXPORT_SYMBOL(___preempt_schedule);
29197 EXPORT_SYMBOL(___preempt_schedule_context);
29198 #endif
29199 #endif
29200+
29201+#ifdef CONFIG_PAX_PER_CPU_PGD
29202+EXPORT_SYMBOL(cpu_pgd);
29203+#endif
29204diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
29205index e48b674..a451dd9 100644
29206--- a/arch/x86/kernel/x86_init.c
29207+++ b/arch/x86/kernel/x86_init.c
29208@@ -93,7 +93,7 @@ struct x86_cpuinit_ops x86_cpuinit = {
29209 static void default_nmi_init(void) { };
29210 static int default_i8042_detect(void) { return 1; };
29211
29212-struct x86_platform_ops x86_platform = {
29213+struct x86_platform_ops x86_platform __read_only = {
29214 .calibrate_tsc = native_calibrate_tsc,
29215 .get_wallclock = mach_get_cmos_time,
29216 .set_wallclock = mach_set_rtc_mmss,
29217@@ -109,7 +109,7 @@ struct x86_platform_ops x86_platform = {
29218 EXPORT_SYMBOL_GPL(x86_platform);
29219
29220 #if defined(CONFIG_PCI_MSI)
29221-struct x86_msi_ops x86_msi = {
29222+struct x86_msi_ops x86_msi __read_only = {
29223 .setup_msi_irqs = native_setup_msi_irqs,
29224 .compose_msi_msg = native_compose_msi_msg,
29225 .teardown_msi_irq = native_teardown_msi_irq,
29226@@ -150,7 +150,7 @@ u32 arch_msix_mask_irq(struct msi_desc *desc, u32 flag)
29227 }
29228 #endif
29229
29230-struct x86_io_apic_ops x86_io_apic_ops = {
29231+struct x86_io_apic_ops x86_io_apic_ops __read_only = {
29232 .init = native_io_apic_init_mappings,
29233 .read = native_io_apic_read,
29234 .write = native_io_apic_write,
29235diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
29236index 940b142..0ad3a10 100644
29237--- a/arch/x86/kernel/xsave.c
29238+++ b/arch/x86/kernel/xsave.c
29239@@ -167,18 +167,18 @@ static inline int save_xstate_epilog(void __user *buf, int ia32_frame)
29240
29241 /* Setup the bytes not touched by the [f]xsave and reserved for SW. */
29242 sw_bytes = ia32_frame ? &fx_sw_reserved_ia32 : &fx_sw_reserved;
29243- err = __copy_to_user(&x->i387.sw_reserved, sw_bytes, sizeof(*sw_bytes));
29244+ err = __copy_to_user(x->i387.sw_reserved, sw_bytes, sizeof(*sw_bytes));
29245
29246 if (!use_xsave())
29247 return err;
29248
29249- err |= __put_user(FP_XSTATE_MAGIC2, (__u32 *)(buf + xstate_size));
29250+ err |= __put_user(FP_XSTATE_MAGIC2, (__u32 __user *)(buf + xstate_size));
29251
29252 /*
29253 * Read the xstate_bv which we copied (directly from the cpu or
29254 * from the state in task struct) to the user buffers.
29255 */
29256- err |= __get_user(xstate_bv, (__u32 *)&x->xsave_hdr.xstate_bv);
29257+ err |= __get_user(xstate_bv, (__u32 __user *)&x->xsave_hdr.xstate_bv);
29258
29259 /*
29260 * For legacy compatible, we always set FP/SSE bits in the bit
29261@@ -193,7 +193,7 @@ static inline int save_xstate_epilog(void __user *buf, int ia32_frame)
29262 */
29263 xstate_bv |= XSTATE_FPSSE;
29264
29265- err |= __put_user(xstate_bv, (__u32 *)&x->xsave_hdr.xstate_bv);
29266+ err |= __put_user(xstate_bv, (__u32 __user *)&x->xsave_hdr.xstate_bv);
29267
29268 return err;
29269 }
29270@@ -202,6 +202,7 @@ static inline int save_user_xstate(struct xsave_struct __user *buf)
29271 {
29272 int err;
29273
29274+ buf = (struct xsave_struct __user *)____m(buf);
29275 if (use_xsave())
29276 err = xsave_user(buf);
29277 else if (use_fxsr())
29278@@ -314,6 +315,7 @@ sanitize_restored_xstate(struct task_struct *tsk,
29279 */
29280 static inline int restore_user_xstate(void __user *buf, u64 xbv, int fx_only)
29281 {
29282+ buf = (void __user *)____m(buf);
29283 if (use_xsave()) {
29284 if ((unsigned long)buf % 64 || fx_only) {
29285 u64 init_bv = pcntxt_mask & ~XSTATE_FPSSE;
29286diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
29287index 38a0afe..94421a9 100644
29288--- a/arch/x86/kvm/cpuid.c
29289+++ b/arch/x86/kvm/cpuid.c
29290@@ -166,15 +166,20 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
29291 struct kvm_cpuid2 *cpuid,
29292 struct kvm_cpuid_entry2 __user *entries)
29293 {
29294- int r;
29295+ int r, i;
29296
29297 r = -E2BIG;
29298 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
29299 goto out;
29300 r = -EFAULT;
29301- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
29302- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
29303+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
29304 goto out;
29305+ for (i = 0; i < cpuid->nent; ++i) {
29306+ struct kvm_cpuid_entry2 cpuid_entry;
29307+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
29308+ goto out;
29309+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
29310+ }
29311 vcpu->arch.cpuid_nent = cpuid->nent;
29312 kvm_apic_set_version(vcpu);
29313 kvm_x86_ops->cpuid_update(vcpu);
29314@@ -189,15 +194,19 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
29315 struct kvm_cpuid2 *cpuid,
29316 struct kvm_cpuid_entry2 __user *entries)
29317 {
29318- int r;
29319+ int r, i;
29320
29321 r = -E2BIG;
29322 if (cpuid->nent < vcpu->arch.cpuid_nent)
29323 goto out;
29324 r = -EFAULT;
29325- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
29326- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
29327+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
29328 goto out;
29329+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
29330+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
29331+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
29332+ goto out;
29333+ }
29334 return 0;
29335
29336 out:
29337diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
29338index 03954f7..48daa1a 100644
29339--- a/arch/x86/kvm/emulate.c
29340+++ b/arch/x86/kvm/emulate.c
29341@@ -504,11 +504,6 @@ static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
29342 masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
29343 }
29344
29345-static inline void jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
29346-{
29347- register_address_increment(ctxt, &ctxt->_eip, rel);
29348-}
29349-
29350 static u32 desc_limit_scaled(struct desc_struct *desc)
29351 {
29352 u32 limit = get_desc_limit(desc);
29353@@ -568,6 +563,38 @@ static int emulate_nm(struct x86_emulate_ctxt *ctxt)
29354 return emulate_exception(ctxt, NM_VECTOR, 0, false);
29355 }
29356
29357+static inline int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
29358+ int cs_l)
29359+{
29360+ switch (ctxt->op_bytes) {
29361+ case 2:
29362+ ctxt->_eip = (u16)dst;
29363+ break;
29364+ case 4:
29365+ ctxt->_eip = (u32)dst;
29366+ break;
29367+ case 8:
29368+ if ((cs_l && is_noncanonical_address(dst)) ||
29369+ (!cs_l && (dst & ~(u32)-1)))
29370+ return emulate_gp(ctxt, 0);
29371+ ctxt->_eip = dst;
29372+ break;
29373+ default:
29374+ WARN(1, "unsupported eip assignment size\n");
29375+ }
29376+ return X86EMUL_CONTINUE;
29377+}
29378+
29379+static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
29380+{
29381+ return assign_eip_far(ctxt, dst, ctxt->mode == X86EMUL_MODE_PROT64);
29382+}
29383+
29384+static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
29385+{
29386+ return assign_eip_near(ctxt, ctxt->_eip + rel);
29387+}
29388+
29389 static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
29390 {
29391 u16 selector;
29392@@ -750,8 +777,10 @@ static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
29393 static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
29394 unsigned size)
29395 {
29396- if (unlikely(ctxt->fetch.end - ctxt->fetch.ptr < size))
29397- return __do_insn_fetch_bytes(ctxt, size);
29398+ unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr;
29399+
29400+ if (unlikely(done_size < size))
29401+ return __do_insn_fetch_bytes(ctxt, size - done_size);
29402 else
29403 return X86EMUL_CONTINUE;
29404 }
29405@@ -1415,7 +1444,9 @@ static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
29406
29407 /* Does not support long mode */
29408 static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
29409- u16 selector, int seg, u8 cpl, bool in_task_switch)
29410+ u16 selector, int seg, u8 cpl,
29411+ bool in_task_switch,
29412+ struct desc_struct *desc)
29413 {
29414 struct desc_struct seg_desc, old_desc;
29415 u8 dpl, rpl;
29416@@ -1547,6 +1578,8 @@ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
29417 }
29418 load:
29419 ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
29420+ if (desc)
29421+ *desc = seg_desc;
29422 return X86EMUL_CONTINUE;
29423 exception:
29424 emulate_exception(ctxt, err_vec, err_code, true);
29425@@ -1557,7 +1590,7 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
29426 u16 selector, int seg)
29427 {
29428 u8 cpl = ctxt->ops->cpl(ctxt);
29429- return __load_segment_descriptor(ctxt, selector, seg, cpl, false);
29430+ return __load_segment_descriptor(ctxt, selector, seg, cpl, false, NULL);
29431 }
29432
29433 static void write_register_operand(struct operand *op)
29434@@ -1951,17 +1984,31 @@ static int em_iret(struct x86_emulate_ctxt *ctxt)
29435 static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
29436 {
29437 int rc;
29438- unsigned short sel;
29439+ unsigned short sel, old_sel;
29440+ struct desc_struct old_desc, new_desc;
29441+ const struct x86_emulate_ops *ops = ctxt->ops;
29442+ u8 cpl = ctxt->ops->cpl(ctxt);
29443+
29444+ /* Assignment of RIP may only fail in 64-bit mode */
29445+ if (ctxt->mode == X86EMUL_MODE_PROT64)
29446+ ops->get_segment(ctxt, &old_sel, &old_desc, NULL,
29447+ VCPU_SREG_CS);
29448
29449 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
29450
29451- rc = load_segment_descriptor(ctxt, sel, VCPU_SREG_CS);
29452+ rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, false,
29453+ &new_desc);
29454 if (rc != X86EMUL_CONTINUE)
29455 return rc;
29456
29457- ctxt->_eip = 0;
29458- memcpy(&ctxt->_eip, ctxt->src.valptr, ctxt->op_bytes);
29459- return X86EMUL_CONTINUE;
29460+ rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l);
29461+ if (rc != X86EMUL_CONTINUE) {
29462+ WARN_ON(!ctxt->mode != X86EMUL_MODE_PROT64);
29463+ /* assigning eip failed; restore the old cs */
29464+ ops->set_segment(ctxt, old_sel, &old_desc, 0, VCPU_SREG_CS);
29465+ return rc;
29466+ }
29467+ return rc;
29468 }
29469
29470 static int em_grp45(struct x86_emulate_ctxt *ctxt)
29471@@ -1972,13 +2019,15 @@ static int em_grp45(struct x86_emulate_ctxt *ctxt)
29472 case 2: /* call near abs */ {
29473 long int old_eip;
29474 old_eip = ctxt->_eip;
29475- ctxt->_eip = ctxt->src.val;
29476+ rc = assign_eip_near(ctxt, ctxt->src.val);
29477+ if (rc != X86EMUL_CONTINUE)
29478+ break;
29479 ctxt->src.val = old_eip;
29480 rc = em_push(ctxt);
29481 break;
29482 }
29483 case 4: /* jmp abs */
29484- ctxt->_eip = ctxt->src.val;
29485+ rc = assign_eip_near(ctxt, ctxt->src.val);
29486 break;
29487 case 5: /* jmp far */
29488 rc = em_jmp_far(ctxt);
29489@@ -2013,30 +2062,47 @@ static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
29490
29491 static int em_ret(struct x86_emulate_ctxt *ctxt)
29492 {
29493- ctxt->dst.type = OP_REG;
29494- ctxt->dst.addr.reg = &ctxt->_eip;
29495- ctxt->dst.bytes = ctxt->op_bytes;
29496- return em_pop(ctxt);
29497+ int rc;
29498+ unsigned long eip;
29499+
29500+ rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
29501+ if (rc != X86EMUL_CONTINUE)
29502+ return rc;
29503+
29504+ return assign_eip_near(ctxt, eip);
29505 }
29506
29507 static int em_ret_far(struct x86_emulate_ctxt *ctxt)
29508 {
29509 int rc;
29510- unsigned long cs;
29511+ unsigned long eip, cs;
29512+ u16 old_cs;
29513 int cpl = ctxt->ops->cpl(ctxt);
29514+ struct desc_struct old_desc, new_desc;
29515+ const struct x86_emulate_ops *ops = ctxt->ops;
29516
29517- rc = emulate_pop(ctxt, &ctxt->_eip, ctxt->op_bytes);
29518+ if (ctxt->mode == X86EMUL_MODE_PROT64)
29519+ ops->get_segment(ctxt, &old_cs, &old_desc, NULL,
29520+ VCPU_SREG_CS);
29521+
29522+ rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
29523 if (rc != X86EMUL_CONTINUE)
29524 return rc;
29525- if (ctxt->op_bytes == 4)
29526- ctxt->_eip = (u32)ctxt->_eip;
29527 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
29528 if (rc != X86EMUL_CONTINUE)
29529 return rc;
29530 /* Outer-privilege level return is not implemented */
29531 if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
29532 return X86EMUL_UNHANDLEABLE;
29533- rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
29534+ rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, 0, false,
29535+ &new_desc);
29536+ if (rc != X86EMUL_CONTINUE)
29537+ return rc;
29538+ rc = assign_eip_far(ctxt, eip, new_desc.l);
29539+ if (rc != X86EMUL_CONTINUE) {
29540+ WARN_ON(!ctxt->mode != X86EMUL_MODE_PROT64);
29541+ ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
29542+ }
29543 return rc;
29544 }
29545
29546@@ -2297,7 +2363,7 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt)
29547 {
29548 const struct x86_emulate_ops *ops = ctxt->ops;
29549 struct desc_struct cs, ss;
29550- u64 msr_data;
29551+ u64 msr_data, rcx, rdx;
29552 int usermode;
29553 u16 cs_sel = 0, ss_sel = 0;
29554
29555@@ -2313,6 +2379,9 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt)
29556 else
29557 usermode = X86EMUL_MODE_PROT32;
29558
29559+ rcx = reg_read(ctxt, VCPU_REGS_RCX);
29560+ rdx = reg_read(ctxt, VCPU_REGS_RDX);
29561+
29562 cs.dpl = 3;
29563 ss.dpl = 3;
29564 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
29565@@ -2330,6 +2399,9 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt)
29566 ss_sel = cs_sel + 8;
29567 cs.d = 0;
29568 cs.l = 1;
29569+ if (is_noncanonical_address(rcx) ||
29570+ is_noncanonical_address(rdx))
29571+ return emulate_gp(ctxt, 0);
29572 break;
29573 }
29574 cs_sel |= SELECTOR_RPL_MASK;
29575@@ -2338,8 +2410,8 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt)
29576 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
29577 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
29578
29579- ctxt->_eip = reg_read(ctxt, VCPU_REGS_RDX);
29580- *reg_write(ctxt, VCPU_REGS_RSP) = reg_read(ctxt, VCPU_REGS_RCX);
29581+ ctxt->_eip = rdx;
29582+ *reg_write(ctxt, VCPU_REGS_RSP) = rcx;
29583
29584 return X86EMUL_CONTINUE;
29585 }
29586@@ -2457,19 +2529,24 @@ static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
29587 * Now load segment descriptors. If fault happens at this stage
29588 * it is handled in a context of new task
29589 */
29590- ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl, true);
29591+ ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
29592+ true, NULL);
29593 if (ret != X86EMUL_CONTINUE)
29594 return ret;
29595- ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl, true);
29596+ ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
29597+ true, NULL);
29598 if (ret != X86EMUL_CONTINUE)
29599 return ret;
29600- ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl, true);
29601+ ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
29602+ true, NULL);
29603 if (ret != X86EMUL_CONTINUE)
29604 return ret;
29605- ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl, true);
29606+ ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
29607+ true, NULL);
29608 if (ret != X86EMUL_CONTINUE)
29609 return ret;
29610- ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl, true);
29611+ ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
29612+ true, NULL);
29613 if (ret != X86EMUL_CONTINUE)
29614 return ret;
29615
29616@@ -2594,25 +2671,32 @@ static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
29617 * Now load segment descriptors. If fault happenes at this stage
29618 * it is handled in a context of new task
29619 */
29620- ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR, cpl, true);
29621+ ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
29622+ cpl, true, NULL);
29623 if (ret != X86EMUL_CONTINUE)
29624 return ret;
29625- ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl, true);
29626+ ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
29627+ true, NULL);
29628 if (ret != X86EMUL_CONTINUE)
29629 return ret;
29630- ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl, true);
29631+ ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
29632+ true, NULL);
29633 if (ret != X86EMUL_CONTINUE)
29634 return ret;
29635- ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl, true);
29636+ ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
29637+ true, NULL);
29638 if (ret != X86EMUL_CONTINUE)
29639 return ret;
29640- ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl, true);
29641+ ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
29642+ true, NULL);
29643 if (ret != X86EMUL_CONTINUE)
29644 return ret;
29645- ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl, true);
29646+ ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
29647+ true, NULL);
29648 if (ret != X86EMUL_CONTINUE)
29649 return ret;
29650- ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl, true);
29651+ ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
29652+ true, NULL);
29653 if (ret != X86EMUL_CONTINUE)
29654 return ret;
29655
29656@@ -2880,10 +2964,13 @@ static int em_aad(struct x86_emulate_ctxt *ctxt)
29657
29658 static int em_call(struct x86_emulate_ctxt *ctxt)
29659 {
29660+ int rc;
29661 long rel = ctxt->src.val;
29662
29663 ctxt->src.val = (unsigned long)ctxt->_eip;
29664- jmp_rel(ctxt, rel);
29665+ rc = jmp_rel(ctxt, rel);
29666+ if (rc != X86EMUL_CONTINUE)
29667+ return rc;
29668 return em_push(ctxt);
29669 }
29670
29671@@ -2892,34 +2979,50 @@ static int em_call_far(struct x86_emulate_ctxt *ctxt)
29672 u16 sel, old_cs;
29673 ulong old_eip;
29674 int rc;
29675+ struct desc_struct old_desc, new_desc;
29676+ const struct x86_emulate_ops *ops = ctxt->ops;
29677+ int cpl = ctxt->ops->cpl(ctxt);
29678
29679- old_cs = get_segment_selector(ctxt, VCPU_SREG_CS);
29680 old_eip = ctxt->_eip;
29681+ ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
29682
29683 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
29684- if (load_segment_descriptor(ctxt, sel, VCPU_SREG_CS))
29685+ rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, false,
29686+ &new_desc);
29687+ if (rc != X86EMUL_CONTINUE)
29688 return X86EMUL_CONTINUE;
29689
29690- ctxt->_eip = 0;
29691- memcpy(&ctxt->_eip, ctxt->src.valptr, ctxt->op_bytes);
29692+ rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l);
29693+ if (rc != X86EMUL_CONTINUE)
29694+ goto fail;
29695
29696 ctxt->src.val = old_cs;
29697 rc = em_push(ctxt);
29698 if (rc != X86EMUL_CONTINUE)
29699- return rc;
29700+ goto fail;
29701
29702 ctxt->src.val = old_eip;
29703- return em_push(ctxt);
29704+ rc = em_push(ctxt);
29705+ /* If we failed, we tainted the memory, but the very least we should
29706+ restore cs */
29707+ if (rc != X86EMUL_CONTINUE)
29708+ goto fail;
29709+ return rc;
29710+fail:
29711+ ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
29712+ return rc;
29713+
29714 }
29715
29716 static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
29717 {
29718 int rc;
29719+ unsigned long eip;
29720
29721- ctxt->dst.type = OP_REG;
29722- ctxt->dst.addr.reg = &ctxt->_eip;
29723- ctxt->dst.bytes = ctxt->op_bytes;
29724- rc = emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
29725+ rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
29726+ if (rc != X86EMUL_CONTINUE)
29727+ return rc;
29728+ rc = assign_eip_near(ctxt, eip);
29729 if (rc != X86EMUL_CONTINUE)
29730 return rc;
29731 rsp_increment(ctxt, ctxt->src.val);
29732@@ -3250,20 +3353,24 @@ static int em_lmsw(struct x86_emulate_ctxt *ctxt)
29733
29734 static int em_loop(struct x86_emulate_ctxt *ctxt)
29735 {
29736+ int rc = X86EMUL_CONTINUE;
29737+
29738 register_address_increment(ctxt, reg_rmw(ctxt, VCPU_REGS_RCX), -1);
29739 if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
29740 (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
29741- jmp_rel(ctxt, ctxt->src.val);
29742+ rc = jmp_rel(ctxt, ctxt->src.val);
29743
29744- return X86EMUL_CONTINUE;
29745+ return rc;
29746 }
29747
29748 static int em_jcxz(struct x86_emulate_ctxt *ctxt)
29749 {
29750+ int rc = X86EMUL_CONTINUE;
29751+
29752 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
29753- jmp_rel(ctxt, ctxt->src.val);
29754+ rc = jmp_rel(ctxt, ctxt->src.val);
29755
29756- return X86EMUL_CONTINUE;
29757+ return rc;
29758 }
29759
29760 static int em_in(struct x86_emulate_ctxt *ctxt)
29761@@ -3351,6 +3458,12 @@ static int em_bswap(struct x86_emulate_ctxt *ctxt)
29762 return X86EMUL_CONTINUE;
29763 }
29764
29765+static int em_clflush(struct x86_emulate_ctxt *ctxt)
29766+{
29767+ /* emulating clflush regardless of cpuid */
29768+ return X86EMUL_CONTINUE;
29769+}
29770+
29771 static bool valid_cr(int nr)
29772 {
29773 switch (nr) {
29774@@ -3683,6 +3796,16 @@ static const struct opcode group11[] = {
29775 X7(D(Undefined)),
29776 };
29777
29778+static const struct gprefix pfx_0f_ae_7 = {
29779+ I(SrcMem | ByteOp, em_clflush), N, N, N,
29780+};
29781+
29782+static const struct group_dual group15 = { {
29783+ N, N, N, N, N, N, N, GP(0, &pfx_0f_ae_7),
29784+}, {
29785+ N, N, N, N, N, N, N, N,
29786+} };
29787+
29788 static const struct gprefix pfx_0f_6f_0f_7f = {
29789 I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
29790 };
29791@@ -3887,10 +4010,11 @@ static const struct opcode twobyte_table[256] = {
29792 N, I(ImplicitOps | EmulateOnUD, em_syscall),
29793 II(ImplicitOps | Priv, em_clts, clts), N,
29794 DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
29795- N, D(ImplicitOps | ModRM), N, N,
29796+ N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
29797 /* 0x10 - 0x1F */
29798 N, N, N, N, N, N, N, N,
29799- D(ImplicitOps | ModRM), N, N, N, N, N, N, D(ImplicitOps | ModRM),
29800+ D(ImplicitOps | ModRM | SrcMem | NoAccess),
29801+ N, N, N, N, N, N, D(ImplicitOps | ModRM | SrcMem | NoAccess),
29802 /* 0x20 - 0x2F */
29803 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read),
29804 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
29805@@ -3942,7 +4066,7 @@ static const struct opcode twobyte_table[256] = {
29806 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
29807 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
29808 F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
29809- D(ModRM), F(DstReg | SrcMem | ModRM, em_imul),
29810+ GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul),
29811 /* 0xB0 - 0xB7 */
29812 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_cmpxchg),
29813 I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
29814@@ -4458,10 +4582,10 @@ done_prefixes:
29815 /* Decode and fetch the destination operand: register or memory. */
29816 rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
29817
29818-done:
29819 if (ctxt->rip_relative)
29820 ctxt->memopp->addr.mem.ea += ctxt->_eip;
29821
29822+done:
29823 return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
29824 }
29825
29826@@ -4711,7 +4835,7 @@ special_insn:
29827 break;
29828 case 0x70 ... 0x7f: /* jcc (short) */
29829 if (test_cc(ctxt->b, ctxt->eflags))
29830- jmp_rel(ctxt, ctxt->src.val);
29831+ rc = jmp_rel(ctxt, ctxt->src.val);
29832 break;
29833 case 0x8d: /* lea r16/r32, m */
29834 ctxt->dst.val = ctxt->src.addr.mem.ea;
29835@@ -4741,7 +4865,7 @@ special_insn:
29836 break;
29837 case 0xe9: /* jmp rel */
29838 case 0xeb: /* jmp rel short */
29839- jmp_rel(ctxt, ctxt->src.val);
29840+ rc = jmp_rel(ctxt, ctxt->src.val);
29841 ctxt->dst.type = OP_NONE; /* Disable writeback. */
29842 break;
29843 case 0xf4: /* hlt */
29844@@ -4864,13 +4988,11 @@ twobyte_insn:
29845 break;
29846 case 0x80 ... 0x8f: /* jnz rel, etc*/
29847 if (test_cc(ctxt->b, ctxt->eflags))
29848- jmp_rel(ctxt, ctxt->src.val);
29849+ rc = jmp_rel(ctxt, ctxt->src.val);
29850 break;
29851 case 0x90 ... 0x9f: /* setcc r/m8 */
29852 ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
29853 break;
29854- case 0xae: /* clflush */
29855- break;
29856 case 0xb6 ... 0xb7: /* movzx */
29857 ctxt->dst.bytes = ctxt->op_bytes;
29858 ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
29859diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
29860index 518d864..298781d 100644
29861--- a/arch/x86/kvm/i8254.c
29862+++ b/arch/x86/kvm/i8254.c
29863@@ -262,8 +262,10 @@ void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu)
29864 return;
29865
29866 timer = &pit->pit_state.timer;
29867+ mutex_lock(&pit->pit_state.lock);
29868 if (hrtimer_cancel(timer))
29869 hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
29870+ mutex_unlock(&pit->pit_state.lock);
29871 }
29872
29873 static void destroy_pit_timer(struct kvm_pit *pit)
29874diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
29875index 08e8a89..0e9183e 100644
29876--- a/arch/x86/kvm/lapic.c
29877+++ b/arch/x86/kvm/lapic.c
29878@@ -55,7 +55,7 @@
29879 #define APIC_BUS_CYCLE_NS 1
29880
29881 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
29882-#define apic_debug(fmt, arg...)
29883+#define apic_debug(fmt, arg...) do {} while (0)
29884
29885 #define APIC_LVT_NUM 6
29886 /* 14 is the version for Xeon and Pentium 8.4.8*/
29887diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
29888index 4107765..d9eb358 100644
29889--- a/arch/x86/kvm/paging_tmpl.h
29890+++ b/arch/x86/kvm/paging_tmpl.h
29891@@ -331,7 +331,7 @@ retry_walk:
29892 if (unlikely(kvm_is_error_hva(host_addr)))
29893 goto error;
29894
29895- ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
29896+ ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
29897 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
29898 goto error;
29899 walker->ptep_user[walker->level - 1] = ptep_user;
29900diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
29901index ddf7427..fd84599 100644
29902--- a/arch/x86/kvm/svm.c
29903+++ b/arch/x86/kvm/svm.c
29904@@ -3234,7 +3234,7 @@ static int wrmsr_interception(struct vcpu_svm *svm)
29905 msr.host_initiated = false;
29906
29907 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
29908- if (svm_set_msr(&svm->vcpu, &msr)) {
29909+ if (kvm_set_msr(&svm->vcpu, &msr)) {
29910 trace_kvm_msr_write_ex(ecx, data);
29911 kvm_inject_gp(&svm->vcpu, 0);
29912 } else {
29913@@ -3534,9 +3534,9 @@ static int handle_exit(struct kvm_vcpu *vcpu)
29914
29915 if (exit_code >= ARRAY_SIZE(svm_exit_handlers)
29916 || !svm_exit_handlers[exit_code]) {
29917- kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
29918- kvm_run->hw.hardware_exit_reason = exit_code;
29919- return 0;
29920+ WARN_ONCE(1, "vmx: unexpected exit reason 0x%x\n", exit_code);
29921+ kvm_queue_exception(vcpu, UD_VECTOR);
29922+ return 1;
29923 }
29924
29925 return svm_exit_handlers[exit_code](svm);
29926@@ -3547,7 +3547,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
29927 int cpu = raw_smp_processor_id();
29928
29929 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
29930+
29931+ pax_open_kernel();
29932 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
29933+ pax_close_kernel();
29934+
29935 load_TR_desc();
29936 }
29937
29938@@ -3948,6 +3952,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
29939 #endif
29940 #endif
29941
29942+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
29943+ __set_fs(current_thread_info()->addr_limit);
29944+#endif
29945+
29946 reload_tss(vcpu);
29947
29948 local_irq_disable();
29949diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
29950index 6a118fa..c0b3c00 100644
29951--- a/arch/x86/kvm/vmx.c
29952+++ b/arch/x86/kvm/vmx.c
29953@@ -1341,12 +1341,12 @@ static void vmcs_write64(unsigned long field, u64 value)
29954 #endif
29955 }
29956
29957-static void vmcs_clear_bits(unsigned long field, u32 mask)
29958+static void vmcs_clear_bits(unsigned long field, unsigned long mask)
29959 {
29960 vmcs_writel(field, vmcs_readl(field) & ~mask);
29961 }
29962
29963-static void vmcs_set_bits(unsigned long field, u32 mask)
29964+static void vmcs_set_bits(unsigned long field, unsigned long mask)
29965 {
29966 vmcs_writel(field, vmcs_readl(field) | mask);
29967 }
29968@@ -1606,7 +1606,11 @@ static void reload_tss(void)
29969 struct desc_struct *descs;
29970
29971 descs = (void *)gdt->address;
29972+
29973+ pax_open_kernel();
29974 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
29975+ pax_close_kernel();
29976+
29977 load_TR_desc();
29978 }
29979
29980@@ -1834,6 +1838,10 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
29981 vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */
29982 vmcs_writel(HOST_GDTR_BASE, gdt->address); /* 22.2.4 */
29983
29984+#ifdef CONFIG_PAX_PER_CPU_PGD
29985+ vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
29986+#endif
29987+
29988 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
29989 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
29990 vmx->loaded_vmcs->cpu = cpu;
29991@@ -2123,7 +2131,7 @@ static void setup_msrs(struct vcpu_vmx *vmx)
29992 * reads and returns guest's timestamp counter "register"
29993 * guest_tsc = host_tsc + tsc_offset -- 21.3
29994 */
29995-static u64 guest_read_tsc(void)
29996+static u64 __intentional_overflow(-1) guest_read_tsc(void)
29997 {
29998 u64 host_tsc, tsc_offset;
29999
30000@@ -2632,12 +2640,15 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
30001 default:
30002 msr = find_msr_entry(vmx, msr_index);
30003 if (msr) {
30004+ u64 old_msr_data = msr->data;
30005 msr->data = data;
30006 if (msr - vmx->guest_msrs < vmx->save_nmsrs) {
30007 preempt_disable();
30008- kvm_set_shared_msr(msr->index, msr->data,
30009- msr->mask);
30010+ ret = kvm_set_shared_msr(msr->index, msr->data,
30011+ msr->mask);
30012 preempt_enable();
30013+ if (ret)
30014+ msr->data = old_msr_data;
30015 }
30016 break;
30017 }
30018@@ -3111,8 +3122,11 @@ static __init int hardware_setup(void)
30019 if (!cpu_has_vmx_flexpriority())
30020 flexpriority_enabled = 0;
30021
30022- if (!cpu_has_vmx_tpr_shadow())
30023- kvm_x86_ops->update_cr8_intercept = NULL;
30024+ if (!cpu_has_vmx_tpr_shadow()) {
30025+ pax_open_kernel();
30026+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
30027+ pax_close_kernel();
30028+ }
30029
30030 if (enable_ept && !cpu_has_vmx_ept_2m_page())
30031 kvm_disable_largepages();
30032@@ -3123,13 +3137,15 @@ static __init int hardware_setup(void)
30033 if (!cpu_has_vmx_apicv())
30034 enable_apicv = 0;
30035
30036+ pax_open_kernel();
30037 if (enable_apicv)
30038- kvm_x86_ops->update_cr8_intercept = NULL;
30039+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
30040 else {
30041- kvm_x86_ops->hwapic_irr_update = NULL;
30042- kvm_x86_ops->deliver_posted_interrupt = NULL;
30043- kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
30044+ *(void **)&kvm_x86_ops->hwapic_irr_update = NULL;
30045+ *(void **)&kvm_x86_ops->deliver_posted_interrupt = NULL;
30046+ *(void **)&kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
30047 }
30048+ pax_close_kernel();
30049
30050 if (nested)
30051 nested_vmx_setup_ctls_msrs();
30052@@ -4239,7 +4255,10 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
30053 unsigned long cr4;
30054
30055 vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS); /* 22.2.3 */
30056+
30057+#ifndef CONFIG_PAX_PER_CPU_PGD
30058 vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
30059+#endif
30060
30061 /* Save the most likely value for this task's CR4 in the VMCS. */
30062 cr4 = read_cr4();
30063@@ -4266,7 +4285,7 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
30064 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
30065 vmx->host_idt_base = dt.address;
30066
30067- vmcs_writel(HOST_RIP, vmx_return); /* 22.2.5 */
30068+ vmcs_writel(HOST_RIP, ktla_ktva(vmx_return)); /* 22.2.5 */
30069
30070 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
30071 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
30072@@ -5263,7 +5282,7 @@ static int handle_wrmsr(struct kvm_vcpu *vcpu)
30073 msr.data = data;
30074 msr.index = ecx;
30075 msr.host_initiated = false;
30076- if (vmx_set_msr(vcpu, &msr) != 0) {
30077+ if (kvm_set_msr(vcpu, &msr) != 0) {
30078 trace_kvm_msr_write_ex(ecx, data);
30079 kvm_inject_gp(vcpu, 0);
30080 return 1;
30081@@ -6636,6 +6655,12 @@ static int handle_invept(struct kvm_vcpu *vcpu)
30082 return 1;
30083 }
30084
30085+static int handle_invvpid(struct kvm_vcpu *vcpu)
30086+{
30087+ kvm_queue_exception(vcpu, UD_VECTOR);
30088+ return 1;
30089+}
30090+
30091 /*
30092 * The exit handlers return 1 if the exit was handled fully and guest execution
30093 * may resume. Otherwise they set the kvm_run parameter to indicate what needs
30094@@ -6681,6 +6706,7 @@ static int (*const kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
30095 [EXIT_REASON_MWAIT_INSTRUCTION] = handle_mwait,
30096 [EXIT_REASON_MONITOR_INSTRUCTION] = handle_monitor,
30097 [EXIT_REASON_INVEPT] = handle_invept,
30098+ [EXIT_REASON_INVVPID] = handle_invvpid,
30099 };
30100
30101 static const int kvm_vmx_max_exit_handlers =
30102@@ -6914,7 +6940,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
30103 case EXIT_REASON_VMPTRST: case EXIT_REASON_VMREAD:
30104 case EXIT_REASON_VMRESUME: case EXIT_REASON_VMWRITE:
30105 case EXIT_REASON_VMOFF: case EXIT_REASON_VMON:
30106- case EXIT_REASON_INVEPT:
30107+ case EXIT_REASON_INVEPT: case EXIT_REASON_INVVPID:
30108 /*
30109 * VMX instructions trap unconditionally. This allows L1 to
30110 * emulate them for its L2 guest, i.e., allows 3-level nesting!
30111@@ -7055,10 +7081,10 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
30112 && kvm_vmx_exit_handlers[exit_reason])
30113 return kvm_vmx_exit_handlers[exit_reason](vcpu);
30114 else {
30115- vcpu->run->exit_reason = KVM_EXIT_UNKNOWN;
30116- vcpu->run->hw.hardware_exit_reason = exit_reason;
30117+ WARN_ONCE(1, "vmx: unexpected exit reason 0x%x\n", exit_reason);
30118+ kvm_queue_exception(vcpu, UD_VECTOR);
30119+ return 1;
30120 }
30121- return 0;
30122 }
30123
30124 static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
30125@@ -7465,6 +7491,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
30126 "jmp 2f \n\t"
30127 "1: " __ex(ASM_VMX_VMRESUME) "\n\t"
30128 "2: "
30129+
30130+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
30131+ "ljmp %[cs],$3f\n\t"
30132+ "3: "
30133+#endif
30134+
30135 /* Save guest registers, load host registers, keep flags */
30136 "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
30137 "pop %0 \n\t"
30138@@ -7517,6 +7549,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
30139 #endif
30140 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
30141 [wordsize]"i"(sizeof(ulong))
30142+
30143+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
30144+ ,[cs]"i"(__KERNEL_CS)
30145+#endif
30146+
30147 : "cc", "memory"
30148 #ifdef CONFIG_X86_64
30149 , "rax", "rbx", "rdi", "rsi"
30150@@ -7530,7 +7567,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
30151 if (debugctlmsr)
30152 update_debugctlmsr(debugctlmsr);
30153
30154-#ifndef CONFIG_X86_64
30155+#ifdef CONFIG_X86_32
30156 /*
30157 * The sysexit path does not restore ds/es, so we must set them to
30158 * a reasonable value ourselves.
30159@@ -7539,8 +7576,18 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
30160 * may be executed in interrupt context, which saves and restore segments
30161 * around it, nullifying its effect.
30162 */
30163- loadsegment(ds, __USER_DS);
30164- loadsegment(es, __USER_DS);
30165+ loadsegment(ds, __KERNEL_DS);
30166+ loadsegment(es, __KERNEL_DS);
30167+ loadsegment(ss, __KERNEL_DS);
30168+
30169+#ifdef CONFIG_PAX_KERNEXEC
30170+ loadsegment(fs, __KERNEL_PERCPU);
30171+#endif
30172+
30173+#ifdef CONFIG_PAX_MEMORY_UDEREF
30174+ __set_fs(current_thread_info()->addr_limit);
30175+#endif
30176+
30177 #endif
30178
30179 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
30180diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
30181index 8f1e22d..c23d3c5 100644
30182--- a/arch/x86/kvm/x86.c
30183+++ b/arch/x86/kvm/x86.c
30184@@ -229,20 +229,25 @@ static void kvm_shared_msr_cpu_online(void)
30185 shared_msr_update(i, shared_msrs_global.msrs[i]);
30186 }
30187
30188-void kvm_set_shared_msr(unsigned slot, u64 value, u64 mask)
30189+int kvm_set_shared_msr(unsigned slot, u64 value, u64 mask)
30190 {
30191 unsigned int cpu = smp_processor_id();
30192 struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);
30193+ int err;
30194
30195 if (((value ^ smsr->values[slot].curr) & mask) == 0)
30196- return;
30197+ return 0;
30198 smsr->values[slot].curr = value;
30199- wrmsrl(shared_msrs_global.msrs[slot], value);
30200+ err = wrmsrl_safe(shared_msrs_global.msrs[slot], value);
30201+ if (err)
30202+ return 1;
30203+
30204 if (!smsr->registered) {
30205 smsr->urn.on_user_return = kvm_on_user_return;
30206 user_return_notifier_register(&smsr->urn);
30207 smsr->registered = true;
30208 }
30209+ return 0;
30210 }
30211 EXPORT_SYMBOL_GPL(kvm_set_shared_msr);
30212
30213@@ -984,7 +989,6 @@ void kvm_enable_efer_bits(u64 mask)
30214 }
30215 EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
30216
30217-
30218 /*
30219 * Writes msr value into into the appropriate "register".
30220 * Returns 0 on success, non-0 otherwise.
30221@@ -992,8 +996,34 @@ EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
30222 */
30223 int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
30224 {
30225+ switch (msr->index) {
30226+ case MSR_FS_BASE:
30227+ case MSR_GS_BASE:
30228+ case MSR_KERNEL_GS_BASE:
30229+ case MSR_CSTAR:
30230+ case MSR_LSTAR:
30231+ if (is_noncanonical_address(msr->data))
30232+ return 1;
30233+ break;
30234+ case MSR_IA32_SYSENTER_EIP:
30235+ case MSR_IA32_SYSENTER_ESP:
30236+ /*
30237+ * IA32_SYSENTER_ESP and IA32_SYSENTER_EIP cause #GP if
30238+ * non-canonical address is written on Intel but not on
30239+ * AMD (which ignores the top 32-bits, because it does
30240+ * not implement 64-bit SYSENTER).
30241+ *
30242+ * 64-bit code should hence be able to write a non-canonical
30243+ * value on AMD. Making the address canonical ensures that
30244+ * vmentry does not fail on Intel after writing a non-canonical
30245+ * value, and that something deterministic happens if the guest
30246+ * invokes 64-bit SYSENTER.
30247+ */
30248+ msr->data = get_canonical(msr->data);
30249+ }
30250 return kvm_x86_ops->set_msr(vcpu, msr);
30251 }
30252+EXPORT_SYMBOL_GPL(kvm_set_msr);
30253
30254 /*
30255 * Adapt set_msr() to msr_io()'s calling convention
30256@@ -1827,8 +1857,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
30257 {
30258 struct kvm *kvm = vcpu->kvm;
30259 int lm = is_long_mode(vcpu);
30260- u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
30261- : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
30262+ u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
30263+ : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
30264 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
30265 : kvm->arch.xen_hvm_config.blob_size_32;
30266 u32 page_num = data & ~PAGE_MASK;
30267@@ -2749,6 +2779,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
30268 if (n < msr_list.nmsrs)
30269 goto out;
30270 r = -EFAULT;
30271+ if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
30272+ goto out;
30273 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
30274 num_msrs_to_save * sizeof(u32)))
30275 goto out;
30276@@ -5609,7 +5641,7 @@ static struct notifier_block pvclock_gtod_notifier = {
30277 };
30278 #endif
30279
30280-int kvm_arch_init(void *opaque)
30281+int kvm_arch_init(const void *opaque)
30282 {
30283 int r;
30284 struct kvm_x86_ops *ops = opaque;
30285diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
30286index aae9413..d11e829 100644
30287--- a/arch/x86/lguest/boot.c
30288+++ b/arch/x86/lguest/boot.c
30289@@ -1206,9 +1206,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
30290 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
30291 * Launcher to reboot us.
30292 */
30293-static void lguest_restart(char *reason)
30294+static __noreturn void lguest_restart(char *reason)
30295 {
30296 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
30297+ BUG();
30298 }
30299
30300 /*G:050
30301diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
30302index 00933d5..3a64af9 100644
30303--- a/arch/x86/lib/atomic64_386_32.S
30304+++ b/arch/x86/lib/atomic64_386_32.S
30305@@ -48,6 +48,10 @@ BEGIN(read)
30306 movl (v), %eax
30307 movl 4(v), %edx
30308 RET_ENDP
30309+BEGIN(read_unchecked)
30310+ movl (v), %eax
30311+ movl 4(v), %edx
30312+RET_ENDP
30313 #undef v
30314
30315 #define v %esi
30316@@ -55,6 +59,10 @@ BEGIN(set)
30317 movl %ebx, (v)
30318 movl %ecx, 4(v)
30319 RET_ENDP
30320+BEGIN(set_unchecked)
30321+ movl %ebx, (v)
30322+ movl %ecx, 4(v)
30323+RET_ENDP
30324 #undef v
30325
30326 #define v %esi
30327@@ -70,6 +78,20 @@ RET_ENDP
30328 BEGIN(add)
30329 addl %eax, (v)
30330 adcl %edx, 4(v)
30331+
30332+#ifdef CONFIG_PAX_REFCOUNT
30333+ jno 0f
30334+ subl %eax, (v)
30335+ sbbl %edx, 4(v)
30336+ int $4
30337+0:
30338+ _ASM_EXTABLE(0b, 0b)
30339+#endif
30340+
30341+RET_ENDP
30342+BEGIN(add_unchecked)
30343+ addl %eax, (v)
30344+ adcl %edx, 4(v)
30345 RET_ENDP
30346 #undef v
30347
30348@@ -77,6 +99,24 @@ RET_ENDP
30349 BEGIN(add_return)
30350 addl (v), %eax
30351 adcl 4(v), %edx
30352+
30353+#ifdef CONFIG_PAX_REFCOUNT
30354+ into
30355+1234:
30356+ _ASM_EXTABLE(1234b, 2f)
30357+#endif
30358+
30359+ movl %eax, (v)
30360+ movl %edx, 4(v)
30361+
30362+#ifdef CONFIG_PAX_REFCOUNT
30363+2:
30364+#endif
30365+
30366+RET_ENDP
30367+BEGIN(add_return_unchecked)
30368+ addl (v), %eax
30369+ adcl 4(v), %edx
30370 movl %eax, (v)
30371 movl %edx, 4(v)
30372 RET_ENDP
30373@@ -86,6 +126,20 @@ RET_ENDP
30374 BEGIN(sub)
30375 subl %eax, (v)
30376 sbbl %edx, 4(v)
30377+
30378+#ifdef CONFIG_PAX_REFCOUNT
30379+ jno 0f
30380+ addl %eax, (v)
30381+ adcl %edx, 4(v)
30382+ int $4
30383+0:
30384+ _ASM_EXTABLE(0b, 0b)
30385+#endif
30386+
30387+RET_ENDP
30388+BEGIN(sub_unchecked)
30389+ subl %eax, (v)
30390+ sbbl %edx, 4(v)
30391 RET_ENDP
30392 #undef v
30393
30394@@ -96,6 +150,27 @@ BEGIN(sub_return)
30395 sbbl $0, %edx
30396 addl (v), %eax
30397 adcl 4(v), %edx
30398+
30399+#ifdef CONFIG_PAX_REFCOUNT
30400+ into
30401+1234:
30402+ _ASM_EXTABLE(1234b, 2f)
30403+#endif
30404+
30405+ movl %eax, (v)
30406+ movl %edx, 4(v)
30407+
30408+#ifdef CONFIG_PAX_REFCOUNT
30409+2:
30410+#endif
30411+
30412+RET_ENDP
30413+BEGIN(sub_return_unchecked)
30414+ negl %edx
30415+ negl %eax
30416+ sbbl $0, %edx
30417+ addl (v), %eax
30418+ adcl 4(v), %edx
30419 movl %eax, (v)
30420 movl %edx, 4(v)
30421 RET_ENDP
30422@@ -105,6 +180,20 @@ RET_ENDP
30423 BEGIN(inc)
30424 addl $1, (v)
30425 adcl $0, 4(v)
30426+
30427+#ifdef CONFIG_PAX_REFCOUNT
30428+ jno 0f
30429+ subl $1, (v)
30430+ sbbl $0, 4(v)
30431+ int $4
30432+0:
30433+ _ASM_EXTABLE(0b, 0b)
30434+#endif
30435+
30436+RET_ENDP
30437+BEGIN(inc_unchecked)
30438+ addl $1, (v)
30439+ adcl $0, 4(v)
30440 RET_ENDP
30441 #undef v
30442
30443@@ -114,6 +203,26 @@ BEGIN(inc_return)
30444 movl 4(v), %edx
30445 addl $1, %eax
30446 adcl $0, %edx
30447+
30448+#ifdef CONFIG_PAX_REFCOUNT
30449+ into
30450+1234:
30451+ _ASM_EXTABLE(1234b, 2f)
30452+#endif
30453+
30454+ movl %eax, (v)
30455+ movl %edx, 4(v)
30456+
30457+#ifdef CONFIG_PAX_REFCOUNT
30458+2:
30459+#endif
30460+
30461+RET_ENDP
30462+BEGIN(inc_return_unchecked)
30463+ movl (v), %eax
30464+ movl 4(v), %edx
30465+ addl $1, %eax
30466+ adcl $0, %edx
30467 movl %eax, (v)
30468 movl %edx, 4(v)
30469 RET_ENDP
30470@@ -123,6 +232,20 @@ RET_ENDP
30471 BEGIN(dec)
30472 subl $1, (v)
30473 sbbl $0, 4(v)
30474+
30475+#ifdef CONFIG_PAX_REFCOUNT
30476+ jno 0f
30477+ addl $1, (v)
30478+ adcl $0, 4(v)
30479+ int $4
30480+0:
30481+ _ASM_EXTABLE(0b, 0b)
30482+#endif
30483+
30484+RET_ENDP
30485+BEGIN(dec_unchecked)
30486+ subl $1, (v)
30487+ sbbl $0, 4(v)
30488 RET_ENDP
30489 #undef v
30490
30491@@ -132,6 +255,26 @@ BEGIN(dec_return)
30492 movl 4(v), %edx
30493 subl $1, %eax
30494 sbbl $0, %edx
30495+
30496+#ifdef CONFIG_PAX_REFCOUNT
30497+ into
30498+1234:
30499+ _ASM_EXTABLE(1234b, 2f)
30500+#endif
30501+
30502+ movl %eax, (v)
30503+ movl %edx, 4(v)
30504+
30505+#ifdef CONFIG_PAX_REFCOUNT
30506+2:
30507+#endif
30508+
30509+RET_ENDP
30510+BEGIN(dec_return_unchecked)
30511+ movl (v), %eax
30512+ movl 4(v), %edx
30513+ subl $1, %eax
30514+ sbbl $0, %edx
30515 movl %eax, (v)
30516 movl %edx, 4(v)
30517 RET_ENDP
30518@@ -143,6 +286,13 @@ BEGIN(add_unless)
30519 adcl %edx, %edi
30520 addl (v), %eax
30521 adcl 4(v), %edx
30522+
30523+#ifdef CONFIG_PAX_REFCOUNT
30524+ into
30525+1234:
30526+ _ASM_EXTABLE(1234b, 2f)
30527+#endif
30528+
30529 cmpl %eax, %ecx
30530 je 3f
30531 1:
30532@@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
30533 1:
30534 addl $1, %eax
30535 adcl $0, %edx
30536+
30537+#ifdef CONFIG_PAX_REFCOUNT
30538+ into
30539+1234:
30540+ _ASM_EXTABLE(1234b, 2f)
30541+#endif
30542+
30543 movl %eax, (v)
30544 movl %edx, 4(v)
30545 movl $1, %eax
30546@@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
30547 movl 4(v), %edx
30548 subl $1, %eax
30549 sbbl $0, %edx
30550+
30551+#ifdef CONFIG_PAX_REFCOUNT
30552+ into
30553+1234:
30554+ _ASM_EXTABLE(1234b, 1f)
30555+#endif
30556+
30557 js 1f
30558 movl %eax, (v)
30559 movl %edx, 4(v)
30560diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
30561index f5cc9eb..51fa319 100644
30562--- a/arch/x86/lib/atomic64_cx8_32.S
30563+++ b/arch/x86/lib/atomic64_cx8_32.S
30564@@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
30565 CFI_STARTPROC
30566
30567 read64 %ecx
30568+ pax_force_retaddr
30569 ret
30570 CFI_ENDPROC
30571 ENDPROC(atomic64_read_cx8)
30572
30573+ENTRY(atomic64_read_unchecked_cx8)
30574+ CFI_STARTPROC
30575+
30576+ read64 %ecx
30577+ pax_force_retaddr
30578+ ret
30579+ CFI_ENDPROC
30580+ENDPROC(atomic64_read_unchecked_cx8)
30581+
30582 ENTRY(atomic64_set_cx8)
30583 CFI_STARTPROC
30584
30585@@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
30586 cmpxchg8b (%esi)
30587 jne 1b
30588
30589+ pax_force_retaddr
30590 ret
30591 CFI_ENDPROC
30592 ENDPROC(atomic64_set_cx8)
30593
30594+ENTRY(atomic64_set_unchecked_cx8)
30595+ CFI_STARTPROC
30596+
30597+1:
30598+/* we don't need LOCK_PREFIX since aligned 64-bit writes
30599+ * are atomic on 586 and newer */
30600+ cmpxchg8b (%esi)
30601+ jne 1b
30602+
30603+ pax_force_retaddr
30604+ ret
30605+ CFI_ENDPROC
30606+ENDPROC(atomic64_set_unchecked_cx8)
30607+
30608 ENTRY(atomic64_xchg_cx8)
30609 CFI_STARTPROC
30610
30611@@ -60,12 +85,13 @@ ENTRY(atomic64_xchg_cx8)
30612 cmpxchg8b (%esi)
30613 jne 1b
30614
30615+ pax_force_retaddr
30616 ret
30617 CFI_ENDPROC
30618 ENDPROC(atomic64_xchg_cx8)
30619
30620-.macro addsub_return func ins insc
30621-ENTRY(atomic64_\func\()_return_cx8)
30622+.macro addsub_return func ins insc unchecked=""
30623+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
30624 CFI_STARTPROC
30625 SAVE ebp
30626 SAVE ebx
30627@@ -82,27 +108,44 @@ ENTRY(atomic64_\func\()_return_cx8)
30628 movl %edx, %ecx
30629 \ins\()l %esi, %ebx
30630 \insc\()l %edi, %ecx
30631+
30632+.ifb \unchecked
30633+#ifdef CONFIG_PAX_REFCOUNT
30634+ into
30635+2:
30636+ _ASM_EXTABLE(2b, 3f)
30637+#endif
30638+.endif
30639+
30640 LOCK_PREFIX
30641 cmpxchg8b (%ebp)
30642 jne 1b
30643-
30644-10:
30645 movl %ebx, %eax
30646 movl %ecx, %edx
30647+
30648+.ifb \unchecked
30649+#ifdef CONFIG_PAX_REFCOUNT
30650+3:
30651+#endif
30652+.endif
30653+
30654 RESTORE edi
30655 RESTORE esi
30656 RESTORE ebx
30657 RESTORE ebp
30658+ pax_force_retaddr
30659 ret
30660 CFI_ENDPROC
30661-ENDPROC(atomic64_\func\()_return_cx8)
30662+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
30663 .endm
30664
30665 addsub_return add add adc
30666 addsub_return sub sub sbb
30667+addsub_return add add adc _unchecked
30668+addsub_return sub sub sbb _unchecked
30669
30670-.macro incdec_return func ins insc
30671-ENTRY(atomic64_\func\()_return_cx8)
30672+.macro incdec_return func ins insc unchecked=""
30673+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
30674 CFI_STARTPROC
30675 SAVE ebx
30676
30677@@ -112,21 +155,39 @@ ENTRY(atomic64_\func\()_return_cx8)
30678 movl %edx, %ecx
30679 \ins\()l $1, %ebx
30680 \insc\()l $0, %ecx
30681+
30682+.ifb \unchecked
30683+#ifdef CONFIG_PAX_REFCOUNT
30684+ into
30685+2:
30686+ _ASM_EXTABLE(2b, 3f)
30687+#endif
30688+.endif
30689+
30690 LOCK_PREFIX
30691 cmpxchg8b (%esi)
30692 jne 1b
30693
30694-10:
30695 movl %ebx, %eax
30696 movl %ecx, %edx
30697+
30698+.ifb \unchecked
30699+#ifdef CONFIG_PAX_REFCOUNT
30700+3:
30701+#endif
30702+.endif
30703+
30704 RESTORE ebx
30705+ pax_force_retaddr
30706 ret
30707 CFI_ENDPROC
30708-ENDPROC(atomic64_\func\()_return_cx8)
30709+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
30710 .endm
30711
30712 incdec_return inc add adc
30713 incdec_return dec sub sbb
30714+incdec_return inc add adc _unchecked
30715+incdec_return dec sub sbb _unchecked
30716
30717 ENTRY(atomic64_dec_if_positive_cx8)
30718 CFI_STARTPROC
30719@@ -138,6 +199,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
30720 movl %edx, %ecx
30721 subl $1, %ebx
30722 sbb $0, %ecx
30723+
30724+#ifdef CONFIG_PAX_REFCOUNT
30725+ into
30726+1234:
30727+ _ASM_EXTABLE(1234b, 2f)
30728+#endif
30729+
30730 js 2f
30731 LOCK_PREFIX
30732 cmpxchg8b (%esi)
30733@@ -147,6 +215,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
30734 movl %ebx, %eax
30735 movl %ecx, %edx
30736 RESTORE ebx
30737+ pax_force_retaddr
30738 ret
30739 CFI_ENDPROC
30740 ENDPROC(atomic64_dec_if_positive_cx8)
30741@@ -171,6 +240,13 @@ ENTRY(atomic64_add_unless_cx8)
30742 movl %edx, %ecx
30743 addl %ebp, %ebx
30744 adcl %edi, %ecx
30745+
30746+#ifdef CONFIG_PAX_REFCOUNT
30747+ into
30748+1234:
30749+ _ASM_EXTABLE(1234b, 3f)
30750+#endif
30751+
30752 LOCK_PREFIX
30753 cmpxchg8b (%esi)
30754 jne 1b
30755@@ -181,6 +257,7 @@ ENTRY(atomic64_add_unless_cx8)
30756 CFI_ADJUST_CFA_OFFSET -8
30757 RESTORE ebx
30758 RESTORE ebp
30759+ pax_force_retaddr
30760 ret
30761 4:
30762 cmpl %edx, 4(%esp)
30763@@ -203,6 +280,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
30764 xorl %ecx, %ecx
30765 addl $1, %ebx
30766 adcl %edx, %ecx
30767+
30768+#ifdef CONFIG_PAX_REFCOUNT
30769+ into
30770+1234:
30771+ _ASM_EXTABLE(1234b, 3f)
30772+#endif
30773+
30774 LOCK_PREFIX
30775 cmpxchg8b (%esi)
30776 jne 1b
30777@@ -210,6 +294,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
30778 movl $1, %eax
30779 3:
30780 RESTORE ebx
30781+ pax_force_retaddr
30782 ret
30783 CFI_ENDPROC
30784 ENDPROC(atomic64_inc_not_zero_cx8)
30785diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
30786index e78b8eee..7e173a8 100644
30787--- a/arch/x86/lib/checksum_32.S
30788+++ b/arch/x86/lib/checksum_32.S
30789@@ -29,7 +29,8 @@
30790 #include <asm/dwarf2.h>
30791 #include <asm/errno.h>
30792 #include <asm/asm.h>
30793-
30794+#include <asm/segment.h>
30795+
30796 /*
30797 * computes a partial checksum, e.g. for TCP/UDP fragments
30798 */
30799@@ -293,9 +294,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
30800
30801 #define ARGBASE 16
30802 #define FP 12
30803-
30804-ENTRY(csum_partial_copy_generic)
30805+
30806+ENTRY(csum_partial_copy_generic_to_user)
30807 CFI_STARTPROC
30808+
30809+#ifdef CONFIG_PAX_MEMORY_UDEREF
30810+ pushl_cfi %gs
30811+ popl_cfi %es
30812+ jmp csum_partial_copy_generic
30813+#endif
30814+
30815+ENTRY(csum_partial_copy_generic_from_user)
30816+
30817+#ifdef CONFIG_PAX_MEMORY_UDEREF
30818+ pushl_cfi %gs
30819+ popl_cfi %ds
30820+#endif
30821+
30822+ENTRY(csum_partial_copy_generic)
30823 subl $4,%esp
30824 CFI_ADJUST_CFA_OFFSET 4
30825 pushl_cfi %edi
30826@@ -317,7 +333,7 @@ ENTRY(csum_partial_copy_generic)
30827 jmp 4f
30828 SRC(1: movw (%esi), %bx )
30829 addl $2, %esi
30830-DST( movw %bx, (%edi) )
30831+DST( movw %bx, %es:(%edi) )
30832 addl $2, %edi
30833 addw %bx, %ax
30834 adcl $0, %eax
30835@@ -329,30 +345,30 @@ DST( movw %bx, (%edi) )
30836 SRC(1: movl (%esi), %ebx )
30837 SRC( movl 4(%esi), %edx )
30838 adcl %ebx, %eax
30839-DST( movl %ebx, (%edi) )
30840+DST( movl %ebx, %es:(%edi) )
30841 adcl %edx, %eax
30842-DST( movl %edx, 4(%edi) )
30843+DST( movl %edx, %es:4(%edi) )
30844
30845 SRC( movl 8(%esi), %ebx )
30846 SRC( movl 12(%esi), %edx )
30847 adcl %ebx, %eax
30848-DST( movl %ebx, 8(%edi) )
30849+DST( movl %ebx, %es:8(%edi) )
30850 adcl %edx, %eax
30851-DST( movl %edx, 12(%edi) )
30852+DST( movl %edx, %es:12(%edi) )
30853
30854 SRC( movl 16(%esi), %ebx )
30855 SRC( movl 20(%esi), %edx )
30856 adcl %ebx, %eax
30857-DST( movl %ebx, 16(%edi) )
30858+DST( movl %ebx, %es:16(%edi) )
30859 adcl %edx, %eax
30860-DST( movl %edx, 20(%edi) )
30861+DST( movl %edx, %es:20(%edi) )
30862
30863 SRC( movl 24(%esi), %ebx )
30864 SRC( movl 28(%esi), %edx )
30865 adcl %ebx, %eax
30866-DST( movl %ebx, 24(%edi) )
30867+DST( movl %ebx, %es:24(%edi) )
30868 adcl %edx, %eax
30869-DST( movl %edx, 28(%edi) )
30870+DST( movl %edx, %es:28(%edi) )
30871
30872 lea 32(%esi), %esi
30873 lea 32(%edi), %edi
30874@@ -366,7 +382,7 @@ DST( movl %edx, 28(%edi) )
30875 shrl $2, %edx # This clears CF
30876 SRC(3: movl (%esi), %ebx )
30877 adcl %ebx, %eax
30878-DST( movl %ebx, (%edi) )
30879+DST( movl %ebx, %es:(%edi) )
30880 lea 4(%esi), %esi
30881 lea 4(%edi), %edi
30882 dec %edx
30883@@ -378,12 +394,12 @@ DST( movl %ebx, (%edi) )
30884 jb 5f
30885 SRC( movw (%esi), %cx )
30886 leal 2(%esi), %esi
30887-DST( movw %cx, (%edi) )
30888+DST( movw %cx, %es:(%edi) )
30889 leal 2(%edi), %edi
30890 je 6f
30891 shll $16,%ecx
30892 SRC(5: movb (%esi), %cl )
30893-DST( movb %cl, (%edi) )
30894+DST( movb %cl, %es:(%edi) )
30895 6: addl %ecx, %eax
30896 adcl $0, %eax
30897 7:
30898@@ -394,7 +410,7 @@ DST( movb %cl, (%edi) )
30899
30900 6001:
30901 movl ARGBASE+20(%esp), %ebx # src_err_ptr
30902- movl $-EFAULT, (%ebx)
30903+ movl $-EFAULT, %ss:(%ebx)
30904
30905 # zero the complete destination - computing the rest
30906 # is too much work
30907@@ -407,11 +423,15 @@ DST( movb %cl, (%edi) )
30908
30909 6002:
30910 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
30911- movl $-EFAULT,(%ebx)
30912+ movl $-EFAULT,%ss:(%ebx)
30913 jmp 5000b
30914
30915 .previous
30916
30917+ pushl_cfi %ss
30918+ popl_cfi %ds
30919+ pushl_cfi %ss
30920+ popl_cfi %es
30921 popl_cfi %ebx
30922 CFI_RESTORE ebx
30923 popl_cfi %esi
30924@@ -421,26 +441,43 @@ DST( movb %cl, (%edi) )
30925 popl_cfi %ecx # equivalent to addl $4,%esp
30926 ret
30927 CFI_ENDPROC
30928-ENDPROC(csum_partial_copy_generic)
30929+ENDPROC(csum_partial_copy_generic_to_user)
30930
30931 #else
30932
30933 /* Version for PentiumII/PPro */
30934
30935 #define ROUND1(x) \
30936+ nop; nop; nop; \
30937 SRC(movl x(%esi), %ebx ) ; \
30938 addl %ebx, %eax ; \
30939- DST(movl %ebx, x(%edi) ) ;
30940+ DST(movl %ebx, %es:x(%edi)) ;
30941
30942 #define ROUND(x) \
30943+ nop; nop; nop; \
30944 SRC(movl x(%esi), %ebx ) ; \
30945 adcl %ebx, %eax ; \
30946- DST(movl %ebx, x(%edi) ) ;
30947+ DST(movl %ebx, %es:x(%edi)) ;
30948
30949 #define ARGBASE 12
30950-
30951-ENTRY(csum_partial_copy_generic)
30952+
30953+ENTRY(csum_partial_copy_generic_to_user)
30954 CFI_STARTPROC
30955+
30956+#ifdef CONFIG_PAX_MEMORY_UDEREF
30957+ pushl_cfi %gs
30958+ popl_cfi %es
30959+ jmp csum_partial_copy_generic
30960+#endif
30961+
30962+ENTRY(csum_partial_copy_generic_from_user)
30963+
30964+#ifdef CONFIG_PAX_MEMORY_UDEREF
30965+ pushl_cfi %gs
30966+ popl_cfi %ds
30967+#endif
30968+
30969+ENTRY(csum_partial_copy_generic)
30970 pushl_cfi %ebx
30971 CFI_REL_OFFSET ebx, 0
30972 pushl_cfi %edi
30973@@ -461,7 +498,7 @@ ENTRY(csum_partial_copy_generic)
30974 subl %ebx, %edi
30975 lea -1(%esi),%edx
30976 andl $-32,%edx
30977- lea 3f(%ebx,%ebx), %ebx
30978+ lea 3f(%ebx,%ebx,2), %ebx
30979 testl %esi, %esi
30980 jmp *%ebx
30981 1: addl $64,%esi
30982@@ -482,19 +519,19 @@ ENTRY(csum_partial_copy_generic)
30983 jb 5f
30984 SRC( movw (%esi), %dx )
30985 leal 2(%esi), %esi
30986-DST( movw %dx, (%edi) )
30987+DST( movw %dx, %es:(%edi) )
30988 leal 2(%edi), %edi
30989 je 6f
30990 shll $16,%edx
30991 5:
30992 SRC( movb (%esi), %dl )
30993-DST( movb %dl, (%edi) )
30994+DST( movb %dl, %es:(%edi) )
30995 6: addl %edx, %eax
30996 adcl $0, %eax
30997 7:
30998 .section .fixup, "ax"
30999 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
31000- movl $-EFAULT, (%ebx)
31001+ movl $-EFAULT, %ss:(%ebx)
31002 # zero the complete destination (computing the rest is too much work)
31003 movl ARGBASE+8(%esp),%edi # dst
31004 movl ARGBASE+12(%esp),%ecx # len
31005@@ -502,10 +539,17 @@ DST( movb %dl, (%edi) )
31006 rep; stosb
31007 jmp 7b
31008 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
31009- movl $-EFAULT, (%ebx)
31010+ movl $-EFAULT, %ss:(%ebx)
31011 jmp 7b
31012 .previous
31013
31014+#ifdef CONFIG_PAX_MEMORY_UDEREF
31015+ pushl_cfi %ss
31016+ popl_cfi %ds
31017+ pushl_cfi %ss
31018+ popl_cfi %es
31019+#endif
31020+
31021 popl_cfi %esi
31022 CFI_RESTORE esi
31023 popl_cfi %edi
31024@@ -514,7 +558,7 @@ DST( movb %dl, (%edi) )
31025 CFI_RESTORE ebx
31026 ret
31027 CFI_ENDPROC
31028-ENDPROC(csum_partial_copy_generic)
31029+ENDPROC(csum_partial_copy_generic_to_user)
31030
31031 #undef ROUND
31032 #undef ROUND1
31033diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
31034index f2145cf..cea889d 100644
31035--- a/arch/x86/lib/clear_page_64.S
31036+++ b/arch/x86/lib/clear_page_64.S
31037@@ -11,6 +11,7 @@ ENTRY(clear_page_c)
31038 movl $4096/8,%ecx
31039 xorl %eax,%eax
31040 rep stosq
31041+ pax_force_retaddr
31042 ret
31043 CFI_ENDPROC
31044 ENDPROC(clear_page_c)
31045@@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
31046 movl $4096,%ecx
31047 xorl %eax,%eax
31048 rep stosb
31049+ pax_force_retaddr
31050 ret
31051 CFI_ENDPROC
31052 ENDPROC(clear_page_c_e)
31053@@ -43,6 +45,7 @@ ENTRY(clear_page)
31054 leaq 64(%rdi),%rdi
31055 jnz .Lloop
31056 nop
31057+ pax_force_retaddr
31058 ret
31059 CFI_ENDPROC
31060 .Lclear_page_end:
31061@@ -58,7 +61,7 @@ ENDPROC(clear_page)
31062
31063 #include <asm/cpufeature.h>
31064
31065- .section .altinstr_replacement,"ax"
31066+ .section .altinstr_replacement,"a"
31067 1: .byte 0xeb /* jmp <disp8> */
31068 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
31069 2: .byte 0xeb /* jmp <disp8> */
31070diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
31071index 1e572c5..2a162cd 100644
31072--- a/arch/x86/lib/cmpxchg16b_emu.S
31073+++ b/arch/x86/lib/cmpxchg16b_emu.S
31074@@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu:
31075
31076 popf
31077 mov $1, %al
31078+ pax_force_retaddr
31079 ret
31080
31081 not_same:
31082 popf
31083 xor %al,%al
31084+ pax_force_retaddr
31085 ret
31086
31087 CFI_ENDPROC
31088diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
31089index 176cca6..e0d658e 100644
31090--- a/arch/x86/lib/copy_page_64.S
31091+++ b/arch/x86/lib/copy_page_64.S
31092@@ -9,6 +9,7 @@ copy_page_rep:
31093 CFI_STARTPROC
31094 movl $4096/8, %ecx
31095 rep movsq
31096+ pax_force_retaddr
31097 ret
31098 CFI_ENDPROC
31099 ENDPROC(copy_page_rep)
31100@@ -24,8 +25,8 @@ ENTRY(copy_page)
31101 CFI_ADJUST_CFA_OFFSET 2*8
31102 movq %rbx, (%rsp)
31103 CFI_REL_OFFSET rbx, 0
31104- movq %r12, 1*8(%rsp)
31105- CFI_REL_OFFSET r12, 1*8
31106+ movq %r13, 1*8(%rsp)
31107+ CFI_REL_OFFSET r13, 1*8
31108
31109 movl $(4096/64)-5, %ecx
31110 .p2align 4
31111@@ -38,7 +39,7 @@ ENTRY(copy_page)
31112 movq 0x8*4(%rsi), %r9
31113 movq 0x8*5(%rsi), %r10
31114 movq 0x8*6(%rsi), %r11
31115- movq 0x8*7(%rsi), %r12
31116+ movq 0x8*7(%rsi), %r13
31117
31118 prefetcht0 5*64(%rsi)
31119
31120@@ -49,7 +50,7 @@ ENTRY(copy_page)
31121 movq %r9, 0x8*4(%rdi)
31122 movq %r10, 0x8*5(%rdi)
31123 movq %r11, 0x8*6(%rdi)
31124- movq %r12, 0x8*7(%rdi)
31125+ movq %r13, 0x8*7(%rdi)
31126
31127 leaq 64 (%rsi), %rsi
31128 leaq 64 (%rdi), %rdi
31129@@ -68,7 +69,7 @@ ENTRY(copy_page)
31130 movq 0x8*4(%rsi), %r9
31131 movq 0x8*5(%rsi), %r10
31132 movq 0x8*6(%rsi), %r11
31133- movq 0x8*7(%rsi), %r12
31134+ movq 0x8*7(%rsi), %r13
31135
31136 movq %rax, 0x8*0(%rdi)
31137 movq %rbx, 0x8*1(%rdi)
31138@@ -77,7 +78,7 @@ ENTRY(copy_page)
31139 movq %r9, 0x8*4(%rdi)
31140 movq %r10, 0x8*5(%rdi)
31141 movq %r11, 0x8*6(%rdi)
31142- movq %r12, 0x8*7(%rdi)
31143+ movq %r13, 0x8*7(%rdi)
31144
31145 leaq 64(%rdi), %rdi
31146 leaq 64(%rsi), %rsi
31147@@ -85,10 +86,11 @@ ENTRY(copy_page)
31148
31149 movq (%rsp), %rbx
31150 CFI_RESTORE rbx
31151- movq 1*8(%rsp), %r12
31152- CFI_RESTORE r12
31153+ movq 1*8(%rsp), %r13
31154+ CFI_RESTORE r13
31155 addq $2*8, %rsp
31156 CFI_ADJUST_CFA_OFFSET -2*8
31157+ pax_force_retaddr
31158 ret
31159 .Lcopy_page_end:
31160 CFI_ENDPROC
31161@@ -99,7 +101,7 @@ ENDPROC(copy_page)
31162
31163 #include <asm/cpufeature.h>
31164
31165- .section .altinstr_replacement,"ax"
31166+ .section .altinstr_replacement,"a"
31167 1: .byte 0xeb /* jmp <disp8> */
31168 .byte (copy_page_rep - copy_page) - (2f - 1b) /* offset */
31169 2:
31170diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
31171index dee945d..a84067b 100644
31172--- a/arch/x86/lib/copy_user_64.S
31173+++ b/arch/x86/lib/copy_user_64.S
31174@@ -18,31 +18,7 @@
31175 #include <asm/alternative-asm.h>
31176 #include <asm/asm.h>
31177 #include <asm/smap.h>
31178-
31179-/*
31180- * By placing feature2 after feature1 in altinstructions section, we logically
31181- * implement:
31182- * If CPU has feature2, jmp to alt2 is used
31183- * else if CPU has feature1, jmp to alt1 is used
31184- * else jmp to orig is used.
31185- */
31186- .macro ALTERNATIVE_JUMP feature1,feature2,orig,alt1,alt2
31187-0:
31188- .byte 0xe9 /* 32bit jump */
31189- .long \orig-1f /* by default jump to orig */
31190-1:
31191- .section .altinstr_replacement,"ax"
31192-2: .byte 0xe9 /* near jump with 32bit immediate */
31193- .long \alt1-1b /* offset */ /* or alternatively to alt1 */
31194-3: .byte 0xe9 /* near jump with 32bit immediate */
31195- .long \alt2-1b /* offset */ /* or alternatively to alt2 */
31196- .previous
31197-
31198- .section .altinstructions,"a"
31199- altinstruction_entry 0b,2b,\feature1,5,5
31200- altinstruction_entry 0b,3b,\feature2,5,5
31201- .previous
31202- .endm
31203+#include <asm/pgtable.h>
31204
31205 .macro ALIGN_DESTINATION
31206 #ifdef FIX_ALIGNMENT
31207@@ -70,52 +46,6 @@
31208 #endif
31209 .endm
31210
31211-/* Standard copy_to_user with segment limit checking */
31212-ENTRY(_copy_to_user)
31213- CFI_STARTPROC
31214- GET_THREAD_INFO(%rax)
31215- movq %rdi,%rcx
31216- addq %rdx,%rcx
31217- jc bad_to_user
31218- cmpq TI_addr_limit(%rax),%rcx
31219- ja bad_to_user
31220- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
31221- copy_user_generic_unrolled,copy_user_generic_string, \
31222- copy_user_enhanced_fast_string
31223- CFI_ENDPROC
31224-ENDPROC(_copy_to_user)
31225-
31226-/* Standard copy_from_user with segment limit checking */
31227-ENTRY(_copy_from_user)
31228- CFI_STARTPROC
31229- GET_THREAD_INFO(%rax)
31230- movq %rsi,%rcx
31231- addq %rdx,%rcx
31232- jc bad_from_user
31233- cmpq TI_addr_limit(%rax),%rcx
31234- ja bad_from_user
31235- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
31236- copy_user_generic_unrolled,copy_user_generic_string, \
31237- copy_user_enhanced_fast_string
31238- CFI_ENDPROC
31239-ENDPROC(_copy_from_user)
31240-
31241- .section .fixup,"ax"
31242- /* must zero dest */
31243-ENTRY(bad_from_user)
31244-bad_from_user:
31245- CFI_STARTPROC
31246- movl %edx,%ecx
31247- xorl %eax,%eax
31248- rep
31249- stosb
31250-bad_to_user:
31251- movl %edx,%eax
31252- ret
31253- CFI_ENDPROC
31254-ENDPROC(bad_from_user)
31255- .previous
31256-
31257 /*
31258 * copy_user_generic_unrolled - memory copy with exception handling.
31259 * This version is for CPUs like P4 that don't have efficient micro
31260@@ -131,6 +61,7 @@ ENDPROC(bad_from_user)
31261 */
31262 ENTRY(copy_user_generic_unrolled)
31263 CFI_STARTPROC
31264+ ASM_PAX_OPEN_USERLAND
31265 ASM_STAC
31266 cmpl $8,%edx
31267 jb 20f /* less then 8 bytes, go to byte copy loop */
31268@@ -180,6 +111,8 @@ ENTRY(copy_user_generic_unrolled)
31269 jnz 21b
31270 23: xor %eax,%eax
31271 ASM_CLAC
31272+ ASM_PAX_CLOSE_USERLAND
31273+ pax_force_retaddr
31274 ret
31275
31276 .section .fixup,"ax"
31277@@ -235,6 +168,7 @@ ENDPROC(copy_user_generic_unrolled)
31278 */
31279 ENTRY(copy_user_generic_string)
31280 CFI_STARTPROC
31281+ ASM_PAX_OPEN_USERLAND
31282 ASM_STAC
31283 cmpl $8,%edx
31284 jb 2f /* less than 8 bytes, go to byte copy loop */
31285@@ -249,6 +183,8 @@ ENTRY(copy_user_generic_string)
31286 movsb
31287 xorl %eax,%eax
31288 ASM_CLAC
31289+ ASM_PAX_CLOSE_USERLAND
31290+ pax_force_retaddr
31291 ret
31292
31293 .section .fixup,"ax"
31294@@ -276,12 +212,15 @@ ENDPROC(copy_user_generic_string)
31295 */
31296 ENTRY(copy_user_enhanced_fast_string)
31297 CFI_STARTPROC
31298+ ASM_PAX_OPEN_USERLAND
31299 ASM_STAC
31300 movl %edx,%ecx
31301 1: rep
31302 movsb
31303 xorl %eax,%eax
31304 ASM_CLAC
31305+ ASM_PAX_CLOSE_USERLAND
31306+ pax_force_retaddr
31307 ret
31308
31309 .section .fixup,"ax"
31310diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
31311index 6a4f43c..c70fb52 100644
31312--- a/arch/x86/lib/copy_user_nocache_64.S
31313+++ b/arch/x86/lib/copy_user_nocache_64.S
31314@@ -8,6 +8,7 @@
31315
31316 #include <linux/linkage.h>
31317 #include <asm/dwarf2.h>
31318+#include <asm/alternative-asm.h>
31319
31320 #define FIX_ALIGNMENT 1
31321
31322@@ -16,6 +17,7 @@
31323 #include <asm/thread_info.h>
31324 #include <asm/asm.h>
31325 #include <asm/smap.h>
31326+#include <asm/pgtable.h>
31327
31328 .macro ALIGN_DESTINATION
31329 #ifdef FIX_ALIGNMENT
31330@@ -49,6 +51,16 @@
31331 */
31332 ENTRY(__copy_user_nocache)
31333 CFI_STARTPROC
31334+
31335+#ifdef CONFIG_PAX_MEMORY_UDEREF
31336+ mov pax_user_shadow_base,%rcx
31337+ cmp %rcx,%rsi
31338+ jae 1f
31339+ add %rcx,%rsi
31340+1:
31341+#endif
31342+
31343+ ASM_PAX_OPEN_USERLAND
31344 ASM_STAC
31345 cmpl $8,%edx
31346 jb 20f /* less then 8 bytes, go to byte copy loop */
31347@@ -98,7 +110,9 @@ ENTRY(__copy_user_nocache)
31348 jnz 21b
31349 23: xorl %eax,%eax
31350 ASM_CLAC
31351+ ASM_PAX_CLOSE_USERLAND
31352 sfence
31353+ pax_force_retaddr
31354 ret
31355
31356 .section .fixup,"ax"
31357diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
31358index 2419d5f..fe52d0e 100644
31359--- a/arch/x86/lib/csum-copy_64.S
31360+++ b/arch/x86/lib/csum-copy_64.S
31361@@ -9,6 +9,7 @@
31362 #include <asm/dwarf2.h>
31363 #include <asm/errno.h>
31364 #include <asm/asm.h>
31365+#include <asm/alternative-asm.h>
31366
31367 /*
31368 * Checksum copy with exception handling.
31369@@ -56,8 +57,8 @@ ENTRY(csum_partial_copy_generic)
31370 CFI_ADJUST_CFA_OFFSET 7*8
31371 movq %rbx, 2*8(%rsp)
31372 CFI_REL_OFFSET rbx, 2*8
31373- movq %r12, 3*8(%rsp)
31374- CFI_REL_OFFSET r12, 3*8
31375+ movq %r15, 3*8(%rsp)
31376+ CFI_REL_OFFSET r15, 3*8
31377 movq %r14, 4*8(%rsp)
31378 CFI_REL_OFFSET r14, 4*8
31379 movq %r13, 5*8(%rsp)
31380@@ -72,16 +73,16 @@ ENTRY(csum_partial_copy_generic)
31381 movl %edx, %ecx
31382
31383 xorl %r9d, %r9d
31384- movq %rcx, %r12
31385+ movq %rcx, %r15
31386
31387- shrq $6, %r12
31388+ shrq $6, %r15
31389 jz .Lhandle_tail /* < 64 */
31390
31391 clc
31392
31393 /* main loop. clear in 64 byte blocks */
31394 /* r9: zero, r8: temp2, rbx: temp1, rax: sum, rcx: saved length */
31395- /* r11: temp3, rdx: temp4, r12 loopcnt */
31396+ /* r11: temp3, rdx: temp4, r15 loopcnt */
31397 /* r10: temp5, rbp: temp6, r14 temp7, r13 temp8 */
31398 .p2align 4
31399 .Lloop:
31400@@ -115,7 +116,7 @@ ENTRY(csum_partial_copy_generic)
31401 adcq %r14, %rax
31402 adcq %r13, %rax
31403
31404- decl %r12d
31405+ decl %r15d
31406
31407 dest
31408 movq %rbx, (%rsi)
31409@@ -210,8 +211,8 @@ ENTRY(csum_partial_copy_generic)
31410 .Lende:
31411 movq 2*8(%rsp), %rbx
31412 CFI_RESTORE rbx
31413- movq 3*8(%rsp), %r12
31414- CFI_RESTORE r12
31415+ movq 3*8(%rsp), %r15
31416+ CFI_RESTORE r15
31417 movq 4*8(%rsp), %r14
31418 CFI_RESTORE r14
31419 movq 5*8(%rsp), %r13
31420@@ -220,6 +221,7 @@ ENTRY(csum_partial_copy_generic)
31421 CFI_RESTORE rbp
31422 addq $7*8, %rsp
31423 CFI_ADJUST_CFA_OFFSET -7*8
31424+ pax_force_retaddr
31425 ret
31426 CFI_RESTORE_STATE
31427
31428diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
31429index 7609e0e..b449b98 100644
31430--- a/arch/x86/lib/csum-wrappers_64.c
31431+++ b/arch/x86/lib/csum-wrappers_64.c
31432@@ -53,10 +53,12 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
31433 len -= 2;
31434 }
31435 }
31436+ pax_open_userland();
31437 stac();
31438- isum = csum_partial_copy_generic((__force const void *)src,
31439+ isum = csum_partial_copy_generic((const void __force_kernel *)____m(src),
31440 dst, len, isum, errp, NULL);
31441 clac();
31442+ pax_close_userland();
31443 if (unlikely(*errp))
31444 goto out_err;
31445
31446@@ -110,10 +112,12 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
31447 }
31448
31449 *errp = 0;
31450+ pax_open_userland();
31451 stac();
31452- ret = csum_partial_copy_generic(src, (void __force *)dst,
31453+ ret = csum_partial_copy_generic(src, (void __force_kernel *)____m(dst),
31454 len, isum, NULL, errp);
31455 clac();
31456+ pax_close_userland();
31457 return ret;
31458 }
31459 EXPORT_SYMBOL(csum_partial_copy_to_user);
31460diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
31461index a451235..1daa956 100644
31462--- a/arch/x86/lib/getuser.S
31463+++ b/arch/x86/lib/getuser.S
31464@@ -33,17 +33,40 @@
31465 #include <asm/thread_info.h>
31466 #include <asm/asm.h>
31467 #include <asm/smap.h>
31468+#include <asm/segment.h>
31469+#include <asm/pgtable.h>
31470+#include <asm/alternative-asm.h>
31471+
31472+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
31473+#define __copyuser_seg gs;
31474+#else
31475+#define __copyuser_seg
31476+#endif
31477
31478 .text
31479 ENTRY(__get_user_1)
31480 CFI_STARTPROC
31481+
31482+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
31483 GET_THREAD_INFO(%_ASM_DX)
31484 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
31485 jae bad_get_user
31486 ASM_STAC
31487-1: movzbl (%_ASM_AX),%edx
31488+
31489+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
31490+ mov pax_user_shadow_base,%_ASM_DX
31491+ cmp %_ASM_DX,%_ASM_AX
31492+ jae 1234f
31493+ add %_ASM_DX,%_ASM_AX
31494+1234:
31495+#endif
31496+
31497+#endif
31498+
31499+1: __copyuser_seg movzbl (%_ASM_AX),%edx
31500 xor %eax,%eax
31501 ASM_CLAC
31502+ pax_force_retaddr
31503 ret
31504 CFI_ENDPROC
31505 ENDPROC(__get_user_1)
31506@@ -51,14 +74,28 @@ ENDPROC(__get_user_1)
31507 ENTRY(__get_user_2)
31508 CFI_STARTPROC
31509 add $1,%_ASM_AX
31510+
31511+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
31512 jc bad_get_user
31513 GET_THREAD_INFO(%_ASM_DX)
31514 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
31515 jae bad_get_user
31516 ASM_STAC
31517-2: movzwl -1(%_ASM_AX),%edx
31518+
31519+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
31520+ mov pax_user_shadow_base,%_ASM_DX
31521+ cmp %_ASM_DX,%_ASM_AX
31522+ jae 1234f
31523+ add %_ASM_DX,%_ASM_AX
31524+1234:
31525+#endif
31526+
31527+#endif
31528+
31529+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
31530 xor %eax,%eax
31531 ASM_CLAC
31532+ pax_force_retaddr
31533 ret
31534 CFI_ENDPROC
31535 ENDPROC(__get_user_2)
31536@@ -66,14 +103,28 @@ ENDPROC(__get_user_2)
31537 ENTRY(__get_user_4)
31538 CFI_STARTPROC
31539 add $3,%_ASM_AX
31540+
31541+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
31542 jc bad_get_user
31543 GET_THREAD_INFO(%_ASM_DX)
31544 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
31545 jae bad_get_user
31546 ASM_STAC
31547-3: movl -3(%_ASM_AX),%edx
31548+
31549+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
31550+ mov pax_user_shadow_base,%_ASM_DX
31551+ cmp %_ASM_DX,%_ASM_AX
31552+ jae 1234f
31553+ add %_ASM_DX,%_ASM_AX
31554+1234:
31555+#endif
31556+
31557+#endif
31558+
31559+3: __copyuser_seg movl -3(%_ASM_AX),%edx
31560 xor %eax,%eax
31561 ASM_CLAC
31562+ pax_force_retaddr
31563 ret
31564 CFI_ENDPROC
31565 ENDPROC(__get_user_4)
31566@@ -86,10 +137,20 @@ ENTRY(__get_user_8)
31567 GET_THREAD_INFO(%_ASM_DX)
31568 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
31569 jae bad_get_user
31570+
31571+#ifdef CONFIG_PAX_MEMORY_UDEREF
31572+ mov pax_user_shadow_base,%_ASM_DX
31573+ cmp %_ASM_DX,%_ASM_AX
31574+ jae 1234f
31575+ add %_ASM_DX,%_ASM_AX
31576+1234:
31577+#endif
31578+
31579 ASM_STAC
31580 4: movq -7(%_ASM_AX),%rdx
31581 xor %eax,%eax
31582 ASM_CLAC
31583+ pax_force_retaddr
31584 ret
31585 #else
31586 add $7,%_ASM_AX
31587@@ -98,10 +159,11 @@ ENTRY(__get_user_8)
31588 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
31589 jae bad_get_user_8
31590 ASM_STAC
31591-4: movl -7(%_ASM_AX),%edx
31592-5: movl -3(%_ASM_AX),%ecx
31593+4: __copyuser_seg movl -7(%_ASM_AX),%edx
31594+5: __copyuser_seg movl -3(%_ASM_AX),%ecx
31595 xor %eax,%eax
31596 ASM_CLAC
31597+ pax_force_retaddr
31598 ret
31599 #endif
31600 CFI_ENDPROC
31601@@ -113,6 +175,7 @@ bad_get_user:
31602 xor %edx,%edx
31603 mov $(-EFAULT),%_ASM_AX
31604 ASM_CLAC
31605+ pax_force_retaddr
31606 ret
31607 CFI_ENDPROC
31608 END(bad_get_user)
31609@@ -124,6 +187,7 @@ bad_get_user_8:
31610 xor %ecx,%ecx
31611 mov $(-EFAULT),%_ASM_AX
31612 ASM_CLAC
31613+ pax_force_retaddr
31614 ret
31615 CFI_ENDPROC
31616 END(bad_get_user_8)
31617diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
31618index 54fcffe..7be149e 100644
31619--- a/arch/x86/lib/insn.c
31620+++ b/arch/x86/lib/insn.c
31621@@ -20,8 +20,10 @@
31622
31623 #ifdef __KERNEL__
31624 #include <linux/string.h>
31625+#include <asm/pgtable_types.h>
31626 #else
31627 #include <string.h>
31628+#define ktla_ktva(addr) addr
31629 #endif
31630 #include <asm/inat.h>
31631 #include <asm/insn.h>
31632@@ -53,8 +55,8 @@
31633 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
31634 {
31635 memset(insn, 0, sizeof(*insn));
31636- insn->kaddr = kaddr;
31637- insn->next_byte = kaddr;
31638+ insn->kaddr = ktla_ktva(kaddr);
31639+ insn->next_byte = ktla_ktva(kaddr);
31640 insn->x86_64 = x86_64 ? 1 : 0;
31641 insn->opnd_bytes = 4;
31642 if (x86_64)
31643diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
31644index 05a95e7..326f2fa 100644
31645--- a/arch/x86/lib/iomap_copy_64.S
31646+++ b/arch/x86/lib/iomap_copy_64.S
31647@@ -17,6 +17,7 @@
31648
31649 #include <linux/linkage.h>
31650 #include <asm/dwarf2.h>
31651+#include <asm/alternative-asm.h>
31652
31653 /*
31654 * override generic version in lib/iomap_copy.c
31655@@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
31656 CFI_STARTPROC
31657 movl %edx,%ecx
31658 rep movsd
31659+ pax_force_retaddr
31660 ret
31661 CFI_ENDPROC
31662 ENDPROC(__iowrite32_copy)
31663diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
31664index 56313a3..0db417e 100644
31665--- a/arch/x86/lib/memcpy_64.S
31666+++ b/arch/x86/lib/memcpy_64.S
31667@@ -24,7 +24,7 @@
31668 * This gets patched over the unrolled variant (below) via the
31669 * alternative instructions framework:
31670 */
31671- .section .altinstr_replacement, "ax", @progbits
31672+ .section .altinstr_replacement, "a", @progbits
31673 .Lmemcpy_c:
31674 movq %rdi, %rax
31675 movq %rdx, %rcx
31676@@ -33,6 +33,7 @@
31677 rep movsq
31678 movl %edx, %ecx
31679 rep movsb
31680+ pax_force_retaddr
31681 ret
31682 .Lmemcpy_e:
31683 .previous
31684@@ -44,11 +45,12 @@
31685 * This gets patched over the unrolled variant (below) via the
31686 * alternative instructions framework:
31687 */
31688- .section .altinstr_replacement, "ax", @progbits
31689+ .section .altinstr_replacement, "a", @progbits
31690 .Lmemcpy_c_e:
31691 movq %rdi, %rax
31692 movq %rdx, %rcx
31693 rep movsb
31694+ pax_force_retaddr
31695 ret
31696 .Lmemcpy_e_e:
31697 .previous
31698@@ -136,6 +138,7 @@ ENTRY(memcpy)
31699 movq %r9, 1*8(%rdi)
31700 movq %r10, -2*8(%rdi, %rdx)
31701 movq %r11, -1*8(%rdi, %rdx)
31702+ pax_force_retaddr
31703 retq
31704 .p2align 4
31705 .Lless_16bytes:
31706@@ -148,6 +151,7 @@ ENTRY(memcpy)
31707 movq -1*8(%rsi, %rdx), %r9
31708 movq %r8, 0*8(%rdi)
31709 movq %r9, -1*8(%rdi, %rdx)
31710+ pax_force_retaddr
31711 retq
31712 .p2align 4
31713 .Lless_8bytes:
31714@@ -161,6 +165,7 @@ ENTRY(memcpy)
31715 movl -4(%rsi, %rdx), %r8d
31716 movl %ecx, (%rdi)
31717 movl %r8d, -4(%rdi, %rdx)
31718+ pax_force_retaddr
31719 retq
31720 .p2align 4
31721 .Lless_3bytes:
31722@@ -179,6 +184,7 @@ ENTRY(memcpy)
31723 movb %cl, (%rdi)
31724
31725 .Lend:
31726+ pax_force_retaddr
31727 retq
31728 CFI_ENDPROC
31729 ENDPROC(memcpy)
31730diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
31731index 65268a6..dd1de11 100644
31732--- a/arch/x86/lib/memmove_64.S
31733+++ b/arch/x86/lib/memmove_64.S
31734@@ -202,14 +202,16 @@ ENTRY(memmove)
31735 movb (%rsi), %r11b
31736 movb %r11b, (%rdi)
31737 13:
31738+ pax_force_retaddr
31739 retq
31740 CFI_ENDPROC
31741
31742- .section .altinstr_replacement,"ax"
31743+ .section .altinstr_replacement,"a"
31744 .Lmemmove_begin_forward_efs:
31745 /* Forward moving data. */
31746 movq %rdx, %rcx
31747 rep movsb
31748+ pax_force_retaddr
31749 retq
31750 .Lmemmove_end_forward_efs:
31751 .previous
31752diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
31753index 2dcb380..2eb79fe 100644
31754--- a/arch/x86/lib/memset_64.S
31755+++ b/arch/x86/lib/memset_64.S
31756@@ -16,7 +16,7 @@
31757 *
31758 * rax original destination
31759 */
31760- .section .altinstr_replacement, "ax", @progbits
31761+ .section .altinstr_replacement, "a", @progbits
31762 .Lmemset_c:
31763 movq %rdi,%r9
31764 movq %rdx,%rcx
31765@@ -30,6 +30,7 @@
31766 movl %edx,%ecx
31767 rep stosb
31768 movq %r9,%rax
31769+ pax_force_retaddr
31770 ret
31771 .Lmemset_e:
31772 .previous
31773@@ -45,13 +46,14 @@
31774 *
31775 * rax original destination
31776 */
31777- .section .altinstr_replacement, "ax", @progbits
31778+ .section .altinstr_replacement, "a", @progbits
31779 .Lmemset_c_e:
31780 movq %rdi,%r9
31781 movb %sil,%al
31782 movq %rdx,%rcx
31783 rep stosb
31784 movq %r9,%rax
31785+ pax_force_retaddr
31786 ret
31787 .Lmemset_e_e:
31788 .previous
31789@@ -118,6 +120,7 @@ ENTRY(__memset)
31790
31791 .Lende:
31792 movq %r10,%rax
31793+ pax_force_retaddr
31794 ret
31795
31796 CFI_RESTORE_STATE
31797diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
31798index c9f2d9b..e7fd2c0 100644
31799--- a/arch/x86/lib/mmx_32.c
31800+++ b/arch/x86/lib/mmx_32.c
31801@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
31802 {
31803 void *p;
31804 int i;
31805+ unsigned long cr0;
31806
31807 if (unlikely(in_interrupt()))
31808 return __memcpy(to, from, len);
31809@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
31810 kernel_fpu_begin();
31811
31812 __asm__ __volatile__ (
31813- "1: prefetch (%0)\n" /* This set is 28 bytes */
31814- " prefetch 64(%0)\n"
31815- " prefetch 128(%0)\n"
31816- " prefetch 192(%0)\n"
31817- " prefetch 256(%0)\n"
31818+ "1: prefetch (%1)\n" /* This set is 28 bytes */
31819+ " prefetch 64(%1)\n"
31820+ " prefetch 128(%1)\n"
31821+ " prefetch 192(%1)\n"
31822+ " prefetch 256(%1)\n"
31823 "2: \n"
31824 ".section .fixup, \"ax\"\n"
31825- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
31826+ "3: \n"
31827+
31828+#ifdef CONFIG_PAX_KERNEXEC
31829+ " movl %%cr0, %0\n"
31830+ " movl %0, %%eax\n"
31831+ " andl $0xFFFEFFFF, %%eax\n"
31832+ " movl %%eax, %%cr0\n"
31833+#endif
31834+
31835+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
31836+
31837+#ifdef CONFIG_PAX_KERNEXEC
31838+ " movl %0, %%cr0\n"
31839+#endif
31840+
31841 " jmp 2b\n"
31842 ".previous\n"
31843 _ASM_EXTABLE(1b, 3b)
31844- : : "r" (from));
31845+ : "=&r" (cr0) : "r" (from) : "ax");
31846
31847 for ( ; i > 5; i--) {
31848 __asm__ __volatile__ (
31849- "1: prefetch 320(%0)\n"
31850- "2: movq (%0), %%mm0\n"
31851- " movq 8(%0), %%mm1\n"
31852- " movq 16(%0), %%mm2\n"
31853- " movq 24(%0), %%mm3\n"
31854- " movq %%mm0, (%1)\n"
31855- " movq %%mm1, 8(%1)\n"
31856- " movq %%mm2, 16(%1)\n"
31857- " movq %%mm3, 24(%1)\n"
31858- " movq 32(%0), %%mm0\n"
31859- " movq 40(%0), %%mm1\n"
31860- " movq 48(%0), %%mm2\n"
31861- " movq 56(%0), %%mm3\n"
31862- " movq %%mm0, 32(%1)\n"
31863- " movq %%mm1, 40(%1)\n"
31864- " movq %%mm2, 48(%1)\n"
31865- " movq %%mm3, 56(%1)\n"
31866+ "1: prefetch 320(%1)\n"
31867+ "2: movq (%1), %%mm0\n"
31868+ " movq 8(%1), %%mm1\n"
31869+ " movq 16(%1), %%mm2\n"
31870+ " movq 24(%1), %%mm3\n"
31871+ " movq %%mm0, (%2)\n"
31872+ " movq %%mm1, 8(%2)\n"
31873+ " movq %%mm2, 16(%2)\n"
31874+ " movq %%mm3, 24(%2)\n"
31875+ " movq 32(%1), %%mm0\n"
31876+ " movq 40(%1), %%mm1\n"
31877+ " movq 48(%1), %%mm2\n"
31878+ " movq 56(%1), %%mm3\n"
31879+ " movq %%mm0, 32(%2)\n"
31880+ " movq %%mm1, 40(%2)\n"
31881+ " movq %%mm2, 48(%2)\n"
31882+ " movq %%mm3, 56(%2)\n"
31883 ".section .fixup, \"ax\"\n"
31884- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
31885+ "3:\n"
31886+
31887+#ifdef CONFIG_PAX_KERNEXEC
31888+ " movl %%cr0, %0\n"
31889+ " movl %0, %%eax\n"
31890+ " andl $0xFFFEFFFF, %%eax\n"
31891+ " movl %%eax, %%cr0\n"
31892+#endif
31893+
31894+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
31895+
31896+#ifdef CONFIG_PAX_KERNEXEC
31897+ " movl %0, %%cr0\n"
31898+#endif
31899+
31900 " jmp 2b\n"
31901 ".previous\n"
31902 _ASM_EXTABLE(1b, 3b)
31903- : : "r" (from), "r" (to) : "memory");
31904+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
31905
31906 from += 64;
31907 to += 64;
31908@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
31909 static void fast_copy_page(void *to, void *from)
31910 {
31911 int i;
31912+ unsigned long cr0;
31913
31914 kernel_fpu_begin();
31915
31916@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
31917 * but that is for later. -AV
31918 */
31919 __asm__ __volatile__(
31920- "1: prefetch (%0)\n"
31921- " prefetch 64(%0)\n"
31922- " prefetch 128(%0)\n"
31923- " prefetch 192(%0)\n"
31924- " prefetch 256(%0)\n"
31925+ "1: prefetch (%1)\n"
31926+ " prefetch 64(%1)\n"
31927+ " prefetch 128(%1)\n"
31928+ " prefetch 192(%1)\n"
31929+ " prefetch 256(%1)\n"
31930 "2: \n"
31931 ".section .fixup, \"ax\"\n"
31932- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
31933+ "3: \n"
31934+
31935+#ifdef CONFIG_PAX_KERNEXEC
31936+ " movl %%cr0, %0\n"
31937+ " movl %0, %%eax\n"
31938+ " andl $0xFFFEFFFF, %%eax\n"
31939+ " movl %%eax, %%cr0\n"
31940+#endif
31941+
31942+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
31943+
31944+#ifdef CONFIG_PAX_KERNEXEC
31945+ " movl %0, %%cr0\n"
31946+#endif
31947+
31948 " jmp 2b\n"
31949 ".previous\n"
31950- _ASM_EXTABLE(1b, 3b) : : "r" (from));
31951+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
31952
31953 for (i = 0; i < (4096-320)/64; i++) {
31954 __asm__ __volatile__ (
31955- "1: prefetch 320(%0)\n"
31956- "2: movq (%0), %%mm0\n"
31957- " movntq %%mm0, (%1)\n"
31958- " movq 8(%0), %%mm1\n"
31959- " movntq %%mm1, 8(%1)\n"
31960- " movq 16(%0), %%mm2\n"
31961- " movntq %%mm2, 16(%1)\n"
31962- " movq 24(%0), %%mm3\n"
31963- " movntq %%mm3, 24(%1)\n"
31964- " movq 32(%0), %%mm4\n"
31965- " movntq %%mm4, 32(%1)\n"
31966- " movq 40(%0), %%mm5\n"
31967- " movntq %%mm5, 40(%1)\n"
31968- " movq 48(%0), %%mm6\n"
31969- " movntq %%mm6, 48(%1)\n"
31970- " movq 56(%0), %%mm7\n"
31971- " movntq %%mm7, 56(%1)\n"
31972+ "1: prefetch 320(%1)\n"
31973+ "2: movq (%1), %%mm0\n"
31974+ " movntq %%mm0, (%2)\n"
31975+ " movq 8(%1), %%mm1\n"
31976+ " movntq %%mm1, 8(%2)\n"
31977+ " movq 16(%1), %%mm2\n"
31978+ " movntq %%mm2, 16(%2)\n"
31979+ " movq 24(%1), %%mm3\n"
31980+ " movntq %%mm3, 24(%2)\n"
31981+ " movq 32(%1), %%mm4\n"
31982+ " movntq %%mm4, 32(%2)\n"
31983+ " movq 40(%1), %%mm5\n"
31984+ " movntq %%mm5, 40(%2)\n"
31985+ " movq 48(%1), %%mm6\n"
31986+ " movntq %%mm6, 48(%2)\n"
31987+ " movq 56(%1), %%mm7\n"
31988+ " movntq %%mm7, 56(%2)\n"
31989 ".section .fixup, \"ax\"\n"
31990- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
31991+ "3:\n"
31992+
31993+#ifdef CONFIG_PAX_KERNEXEC
31994+ " movl %%cr0, %0\n"
31995+ " movl %0, %%eax\n"
31996+ " andl $0xFFFEFFFF, %%eax\n"
31997+ " movl %%eax, %%cr0\n"
31998+#endif
31999+
32000+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
32001+
32002+#ifdef CONFIG_PAX_KERNEXEC
32003+ " movl %0, %%cr0\n"
32004+#endif
32005+
32006 " jmp 2b\n"
32007 ".previous\n"
32008- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
32009+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
32010
32011 from += 64;
32012 to += 64;
32013@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
32014 static void fast_copy_page(void *to, void *from)
32015 {
32016 int i;
32017+ unsigned long cr0;
32018
32019 kernel_fpu_begin();
32020
32021 __asm__ __volatile__ (
32022- "1: prefetch (%0)\n"
32023- " prefetch 64(%0)\n"
32024- " prefetch 128(%0)\n"
32025- " prefetch 192(%0)\n"
32026- " prefetch 256(%0)\n"
32027+ "1: prefetch (%1)\n"
32028+ " prefetch 64(%1)\n"
32029+ " prefetch 128(%1)\n"
32030+ " prefetch 192(%1)\n"
32031+ " prefetch 256(%1)\n"
32032 "2: \n"
32033 ".section .fixup, \"ax\"\n"
32034- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
32035+ "3: \n"
32036+
32037+#ifdef CONFIG_PAX_KERNEXEC
32038+ " movl %%cr0, %0\n"
32039+ " movl %0, %%eax\n"
32040+ " andl $0xFFFEFFFF, %%eax\n"
32041+ " movl %%eax, %%cr0\n"
32042+#endif
32043+
32044+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
32045+
32046+#ifdef CONFIG_PAX_KERNEXEC
32047+ " movl %0, %%cr0\n"
32048+#endif
32049+
32050 " jmp 2b\n"
32051 ".previous\n"
32052- _ASM_EXTABLE(1b, 3b) : : "r" (from));
32053+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
32054
32055 for (i = 0; i < 4096/64; i++) {
32056 __asm__ __volatile__ (
32057- "1: prefetch 320(%0)\n"
32058- "2: movq (%0), %%mm0\n"
32059- " movq 8(%0), %%mm1\n"
32060- " movq 16(%0), %%mm2\n"
32061- " movq 24(%0), %%mm3\n"
32062- " movq %%mm0, (%1)\n"
32063- " movq %%mm1, 8(%1)\n"
32064- " movq %%mm2, 16(%1)\n"
32065- " movq %%mm3, 24(%1)\n"
32066- " movq 32(%0), %%mm0\n"
32067- " movq 40(%0), %%mm1\n"
32068- " movq 48(%0), %%mm2\n"
32069- " movq 56(%0), %%mm3\n"
32070- " movq %%mm0, 32(%1)\n"
32071- " movq %%mm1, 40(%1)\n"
32072- " movq %%mm2, 48(%1)\n"
32073- " movq %%mm3, 56(%1)\n"
32074+ "1: prefetch 320(%1)\n"
32075+ "2: movq (%1), %%mm0\n"
32076+ " movq 8(%1), %%mm1\n"
32077+ " movq 16(%1), %%mm2\n"
32078+ " movq 24(%1), %%mm3\n"
32079+ " movq %%mm0, (%2)\n"
32080+ " movq %%mm1, 8(%2)\n"
32081+ " movq %%mm2, 16(%2)\n"
32082+ " movq %%mm3, 24(%2)\n"
32083+ " movq 32(%1), %%mm0\n"
32084+ " movq 40(%1), %%mm1\n"
32085+ " movq 48(%1), %%mm2\n"
32086+ " movq 56(%1), %%mm3\n"
32087+ " movq %%mm0, 32(%2)\n"
32088+ " movq %%mm1, 40(%2)\n"
32089+ " movq %%mm2, 48(%2)\n"
32090+ " movq %%mm3, 56(%2)\n"
32091 ".section .fixup, \"ax\"\n"
32092- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
32093+ "3:\n"
32094+
32095+#ifdef CONFIG_PAX_KERNEXEC
32096+ " movl %%cr0, %0\n"
32097+ " movl %0, %%eax\n"
32098+ " andl $0xFFFEFFFF, %%eax\n"
32099+ " movl %%eax, %%cr0\n"
32100+#endif
32101+
32102+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
32103+
32104+#ifdef CONFIG_PAX_KERNEXEC
32105+ " movl %0, %%cr0\n"
32106+#endif
32107+
32108 " jmp 2b\n"
32109 ".previous\n"
32110 _ASM_EXTABLE(1b, 3b)
32111- : : "r" (from), "r" (to) : "memory");
32112+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
32113
32114 from += 64;
32115 to += 64;
32116diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
32117index f6d13ee..d789440 100644
32118--- a/arch/x86/lib/msr-reg.S
32119+++ b/arch/x86/lib/msr-reg.S
32120@@ -3,6 +3,7 @@
32121 #include <asm/dwarf2.h>
32122 #include <asm/asm.h>
32123 #include <asm/msr.h>
32124+#include <asm/alternative-asm.h>
32125
32126 #ifdef CONFIG_X86_64
32127 /*
32128@@ -37,6 +38,7 @@ ENTRY(\op\()_safe_regs)
32129 movl %edi, 28(%r10)
32130 popq_cfi %rbp
32131 popq_cfi %rbx
32132+ pax_force_retaddr
32133 ret
32134 3:
32135 CFI_RESTORE_STATE
32136diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
32137index fc6ba17..d4d989d 100644
32138--- a/arch/x86/lib/putuser.S
32139+++ b/arch/x86/lib/putuser.S
32140@@ -16,7 +16,9 @@
32141 #include <asm/errno.h>
32142 #include <asm/asm.h>
32143 #include <asm/smap.h>
32144-
32145+#include <asm/segment.h>
32146+#include <asm/pgtable.h>
32147+#include <asm/alternative-asm.h>
32148
32149 /*
32150 * __put_user_X
32151@@ -30,57 +32,125 @@
32152 * as they get called from within inline assembly.
32153 */
32154
32155-#define ENTER CFI_STARTPROC ; \
32156- GET_THREAD_INFO(%_ASM_BX)
32157-#define EXIT ASM_CLAC ; \
32158- ret ; \
32159+#define ENTER CFI_STARTPROC
32160+#define EXIT ASM_CLAC ; \
32161+ pax_force_retaddr ; \
32162+ ret ; \
32163 CFI_ENDPROC
32164
32165+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
32166+#define _DEST %_ASM_CX,%_ASM_BX
32167+#else
32168+#define _DEST %_ASM_CX
32169+#endif
32170+
32171+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
32172+#define __copyuser_seg gs;
32173+#else
32174+#define __copyuser_seg
32175+#endif
32176+
32177 .text
32178 ENTRY(__put_user_1)
32179 ENTER
32180+
32181+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
32182+ GET_THREAD_INFO(%_ASM_BX)
32183 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
32184 jae bad_put_user
32185 ASM_STAC
32186-1: movb %al,(%_ASM_CX)
32187+
32188+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
32189+ mov pax_user_shadow_base,%_ASM_BX
32190+ cmp %_ASM_BX,%_ASM_CX
32191+ jb 1234f
32192+ xor %ebx,%ebx
32193+1234:
32194+#endif
32195+
32196+#endif
32197+
32198+1: __copyuser_seg movb %al,(_DEST)
32199 xor %eax,%eax
32200 EXIT
32201 ENDPROC(__put_user_1)
32202
32203 ENTRY(__put_user_2)
32204 ENTER
32205+
32206+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
32207+ GET_THREAD_INFO(%_ASM_BX)
32208 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
32209 sub $1,%_ASM_BX
32210 cmp %_ASM_BX,%_ASM_CX
32211 jae bad_put_user
32212 ASM_STAC
32213-2: movw %ax,(%_ASM_CX)
32214+
32215+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
32216+ mov pax_user_shadow_base,%_ASM_BX
32217+ cmp %_ASM_BX,%_ASM_CX
32218+ jb 1234f
32219+ xor %ebx,%ebx
32220+1234:
32221+#endif
32222+
32223+#endif
32224+
32225+2: __copyuser_seg movw %ax,(_DEST)
32226 xor %eax,%eax
32227 EXIT
32228 ENDPROC(__put_user_2)
32229
32230 ENTRY(__put_user_4)
32231 ENTER
32232+
32233+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
32234+ GET_THREAD_INFO(%_ASM_BX)
32235 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
32236 sub $3,%_ASM_BX
32237 cmp %_ASM_BX,%_ASM_CX
32238 jae bad_put_user
32239 ASM_STAC
32240-3: movl %eax,(%_ASM_CX)
32241+
32242+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
32243+ mov pax_user_shadow_base,%_ASM_BX
32244+ cmp %_ASM_BX,%_ASM_CX
32245+ jb 1234f
32246+ xor %ebx,%ebx
32247+1234:
32248+#endif
32249+
32250+#endif
32251+
32252+3: __copyuser_seg movl %eax,(_DEST)
32253 xor %eax,%eax
32254 EXIT
32255 ENDPROC(__put_user_4)
32256
32257 ENTRY(__put_user_8)
32258 ENTER
32259+
32260+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
32261+ GET_THREAD_INFO(%_ASM_BX)
32262 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
32263 sub $7,%_ASM_BX
32264 cmp %_ASM_BX,%_ASM_CX
32265 jae bad_put_user
32266 ASM_STAC
32267-4: mov %_ASM_AX,(%_ASM_CX)
32268+
32269+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
32270+ mov pax_user_shadow_base,%_ASM_BX
32271+ cmp %_ASM_BX,%_ASM_CX
32272+ jb 1234f
32273+ xor %ebx,%ebx
32274+1234:
32275+#endif
32276+
32277+#endif
32278+
32279+4: __copyuser_seg mov %_ASM_AX,(_DEST)
32280 #ifdef CONFIG_X86_32
32281-5: movl %edx,4(%_ASM_CX)
32282+5: __copyuser_seg movl %edx,4(_DEST)
32283 #endif
32284 xor %eax,%eax
32285 EXIT
32286diff --git a/arch/x86/lib/rwlock.S b/arch/x86/lib/rwlock.S
32287index 1cad221..de671ee 100644
32288--- a/arch/x86/lib/rwlock.S
32289+++ b/arch/x86/lib/rwlock.S
32290@@ -16,13 +16,34 @@ ENTRY(__write_lock_failed)
32291 FRAME
32292 0: LOCK_PREFIX
32293 WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
32294+
32295+#ifdef CONFIG_PAX_REFCOUNT
32296+ jno 1234f
32297+ LOCK_PREFIX
32298+ WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
32299+ int $4
32300+1234:
32301+ _ASM_EXTABLE(1234b, 1234b)
32302+#endif
32303+
32304 1: rep; nop
32305 cmpl $WRITE_LOCK_CMP, (%__lock_ptr)
32306 jne 1b
32307 LOCK_PREFIX
32308 WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
32309+
32310+#ifdef CONFIG_PAX_REFCOUNT
32311+ jno 1234f
32312+ LOCK_PREFIX
32313+ WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
32314+ int $4
32315+1234:
32316+ _ASM_EXTABLE(1234b, 1234b)
32317+#endif
32318+
32319 jnz 0b
32320 ENDFRAME
32321+ pax_force_retaddr
32322 ret
32323 CFI_ENDPROC
32324 END(__write_lock_failed)
32325@@ -32,13 +53,34 @@ ENTRY(__read_lock_failed)
32326 FRAME
32327 0: LOCK_PREFIX
32328 READ_LOCK_SIZE(inc) (%__lock_ptr)
32329+
32330+#ifdef CONFIG_PAX_REFCOUNT
32331+ jno 1234f
32332+ LOCK_PREFIX
32333+ READ_LOCK_SIZE(dec) (%__lock_ptr)
32334+ int $4
32335+1234:
32336+ _ASM_EXTABLE(1234b, 1234b)
32337+#endif
32338+
32339 1: rep; nop
32340 READ_LOCK_SIZE(cmp) $1, (%__lock_ptr)
32341 js 1b
32342 LOCK_PREFIX
32343 READ_LOCK_SIZE(dec) (%__lock_ptr)
32344+
32345+#ifdef CONFIG_PAX_REFCOUNT
32346+ jno 1234f
32347+ LOCK_PREFIX
32348+ READ_LOCK_SIZE(inc) (%__lock_ptr)
32349+ int $4
32350+1234:
32351+ _ASM_EXTABLE(1234b, 1234b)
32352+#endif
32353+
32354 js 0b
32355 ENDFRAME
32356+ pax_force_retaddr
32357 ret
32358 CFI_ENDPROC
32359 END(__read_lock_failed)
32360diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
32361index 5dff5f0..cadebf4 100644
32362--- a/arch/x86/lib/rwsem.S
32363+++ b/arch/x86/lib/rwsem.S
32364@@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
32365 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
32366 CFI_RESTORE __ASM_REG(dx)
32367 restore_common_regs
32368+ pax_force_retaddr
32369 ret
32370 CFI_ENDPROC
32371 ENDPROC(call_rwsem_down_read_failed)
32372@@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
32373 movq %rax,%rdi
32374 call rwsem_down_write_failed
32375 restore_common_regs
32376+ pax_force_retaddr
32377 ret
32378 CFI_ENDPROC
32379 ENDPROC(call_rwsem_down_write_failed)
32380@@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
32381 movq %rax,%rdi
32382 call rwsem_wake
32383 restore_common_regs
32384-1: ret
32385+1: pax_force_retaddr
32386+ ret
32387 CFI_ENDPROC
32388 ENDPROC(call_rwsem_wake)
32389
32390@@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
32391 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
32392 CFI_RESTORE __ASM_REG(dx)
32393 restore_common_regs
32394+ pax_force_retaddr
32395 ret
32396 CFI_ENDPROC
32397 ENDPROC(call_rwsem_downgrade_wake)
32398diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
32399index 92d9fea..b2762c8 100644
32400--- a/arch/x86/lib/thunk_64.S
32401+++ b/arch/x86/lib/thunk_64.S
32402@@ -9,6 +9,7 @@
32403 #include <asm/dwarf2.h>
32404 #include <asm/calling.h>
32405 #include <asm/asm.h>
32406+#include <asm/alternative-asm.h>
32407
32408 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
32409 .macro THUNK name, func, put_ret_addr_in_rdi=0
32410@@ -16,11 +17,11 @@
32411 \name:
32412 CFI_STARTPROC
32413
32414- /* this one pushes 9 elems, the next one would be %rIP */
32415- SAVE_ARGS
32416+ /* this one pushes 15+1 elems, the next one would be %rIP */
32417+ SAVE_ARGS 8
32418
32419 .if \put_ret_addr_in_rdi
32420- movq_cfi_restore 9*8, rdi
32421+ movq_cfi_restore RIP, rdi
32422 .endif
32423
32424 call \func
32425@@ -40,9 +41,10 @@
32426
32427 /* SAVE_ARGS below is used only for the .cfi directives it contains. */
32428 CFI_STARTPROC
32429- SAVE_ARGS
32430+ SAVE_ARGS 8
32431 restore:
32432- RESTORE_ARGS
32433+ RESTORE_ARGS 1,8
32434+ pax_force_retaddr
32435 ret
32436 CFI_ENDPROC
32437 _ASM_NOKPROBE(restore)
32438diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
32439index e2f5e21..4b22130 100644
32440--- a/arch/x86/lib/usercopy_32.c
32441+++ b/arch/x86/lib/usercopy_32.c
32442@@ -42,11 +42,13 @@ do { \
32443 int __d0; \
32444 might_fault(); \
32445 __asm__ __volatile__( \
32446+ __COPYUSER_SET_ES \
32447 ASM_STAC "\n" \
32448 "0: rep; stosl\n" \
32449 " movl %2,%0\n" \
32450 "1: rep; stosb\n" \
32451 "2: " ASM_CLAC "\n" \
32452+ __COPYUSER_RESTORE_ES \
32453 ".section .fixup,\"ax\"\n" \
32454 "3: lea 0(%2,%0,4),%0\n" \
32455 " jmp 2b\n" \
32456@@ -98,7 +100,7 @@ EXPORT_SYMBOL(__clear_user);
32457
32458 #ifdef CONFIG_X86_INTEL_USERCOPY
32459 static unsigned long
32460-__copy_user_intel(void __user *to, const void *from, unsigned long size)
32461+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
32462 {
32463 int d0, d1;
32464 __asm__ __volatile__(
32465@@ -110,36 +112,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
32466 " .align 2,0x90\n"
32467 "3: movl 0(%4), %%eax\n"
32468 "4: movl 4(%4), %%edx\n"
32469- "5: movl %%eax, 0(%3)\n"
32470- "6: movl %%edx, 4(%3)\n"
32471+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
32472+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
32473 "7: movl 8(%4), %%eax\n"
32474 "8: movl 12(%4),%%edx\n"
32475- "9: movl %%eax, 8(%3)\n"
32476- "10: movl %%edx, 12(%3)\n"
32477+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
32478+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
32479 "11: movl 16(%4), %%eax\n"
32480 "12: movl 20(%4), %%edx\n"
32481- "13: movl %%eax, 16(%3)\n"
32482- "14: movl %%edx, 20(%3)\n"
32483+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
32484+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
32485 "15: movl 24(%4), %%eax\n"
32486 "16: movl 28(%4), %%edx\n"
32487- "17: movl %%eax, 24(%3)\n"
32488- "18: movl %%edx, 28(%3)\n"
32489+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
32490+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
32491 "19: movl 32(%4), %%eax\n"
32492 "20: movl 36(%4), %%edx\n"
32493- "21: movl %%eax, 32(%3)\n"
32494- "22: movl %%edx, 36(%3)\n"
32495+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
32496+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
32497 "23: movl 40(%4), %%eax\n"
32498 "24: movl 44(%4), %%edx\n"
32499- "25: movl %%eax, 40(%3)\n"
32500- "26: movl %%edx, 44(%3)\n"
32501+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
32502+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
32503 "27: movl 48(%4), %%eax\n"
32504 "28: movl 52(%4), %%edx\n"
32505- "29: movl %%eax, 48(%3)\n"
32506- "30: movl %%edx, 52(%3)\n"
32507+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
32508+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
32509 "31: movl 56(%4), %%eax\n"
32510 "32: movl 60(%4), %%edx\n"
32511- "33: movl %%eax, 56(%3)\n"
32512- "34: movl %%edx, 60(%3)\n"
32513+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
32514+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
32515 " addl $-64, %0\n"
32516 " addl $64, %4\n"
32517 " addl $64, %3\n"
32518@@ -149,10 +151,116 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
32519 " shrl $2, %0\n"
32520 " andl $3, %%eax\n"
32521 " cld\n"
32522+ __COPYUSER_SET_ES
32523 "99: rep; movsl\n"
32524 "36: movl %%eax, %0\n"
32525 "37: rep; movsb\n"
32526 "100:\n"
32527+ __COPYUSER_RESTORE_ES
32528+ ".section .fixup,\"ax\"\n"
32529+ "101: lea 0(%%eax,%0,4),%0\n"
32530+ " jmp 100b\n"
32531+ ".previous\n"
32532+ _ASM_EXTABLE(1b,100b)
32533+ _ASM_EXTABLE(2b,100b)
32534+ _ASM_EXTABLE(3b,100b)
32535+ _ASM_EXTABLE(4b,100b)
32536+ _ASM_EXTABLE(5b,100b)
32537+ _ASM_EXTABLE(6b,100b)
32538+ _ASM_EXTABLE(7b,100b)
32539+ _ASM_EXTABLE(8b,100b)
32540+ _ASM_EXTABLE(9b,100b)
32541+ _ASM_EXTABLE(10b,100b)
32542+ _ASM_EXTABLE(11b,100b)
32543+ _ASM_EXTABLE(12b,100b)
32544+ _ASM_EXTABLE(13b,100b)
32545+ _ASM_EXTABLE(14b,100b)
32546+ _ASM_EXTABLE(15b,100b)
32547+ _ASM_EXTABLE(16b,100b)
32548+ _ASM_EXTABLE(17b,100b)
32549+ _ASM_EXTABLE(18b,100b)
32550+ _ASM_EXTABLE(19b,100b)
32551+ _ASM_EXTABLE(20b,100b)
32552+ _ASM_EXTABLE(21b,100b)
32553+ _ASM_EXTABLE(22b,100b)
32554+ _ASM_EXTABLE(23b,100b)
32555+ _ASM_EXTABLE(24b,100b)
32556+ _ASM_EXTABLE(25b,100b)
32557+ _ASM_EXTABLE(26b,100b)
32558+ _ASM_EXTABLE(27b,100b)
32559+ _ASM_EXTABLE(28b,100b)
32560+ _ASM_EXTABLE(29b,100b)
32561+ _ASM_EXTABLE(30b,100b)
32562+ _ASM_EXTABLE(31b,100b)
32563+ _ASM_EXTABLE(32b,100b)
32564+ _ASM_EXTABLE(33b,100b)
32565+ _ASM_EXTABLE(34b,100b)
32566+ _ASM_EXTABLE(35b,100b)
32567+ _ASM_EXTABLE(36b,100b)
32568+ _ASM_EXTABLE(37b,100b)
32569+ _ASM_EXTABLE(99b,101b)
32570+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
32571+ : "1"(to), "2"(from), "0"(size)
32572+ : "eax", "edx", "memory");
32573+ return size;
32574+}
32575+
32576+static unsigned long
32577+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
32578+{
32579+ int d0, d1;
32580+ __asm__ __volatile__(
32581+ " .align 2,0x90\n"
32582+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
32583+ " cmpl $67, %0\n"
32584+ " jbe 3f\n"
32585+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
32586+ " .align 2,0x90\n"
32587+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
32588+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
32589+ "5: movl %%eax, 0(%3)\n"
32590+ "6: movl %%edx, 4(%3)\n"
32591+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
32592+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
32593+ "9: movl %%eax, 8(%3)\n"
32594+ "10: movl %%edx, 12(%3)\n"
32595+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
32596+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
32597+ "13: movl %%eax, 16(%3)\n"
32598+ "14: movl %%edx, 20(%3)\n"
32599+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
32600+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
32601+ "17: movl %%eax, 24(%3)\n"
32602+ "18: movl %%edx, 28(%3)\n"
32603+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
32604+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
32605+ "21: movl %%eax, 32(%3)\n"
32606+ "22: movl %%edx, 36(%3)\n"
32607+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
32608+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
32609+ "25: movl %%eax, 40(%3)\n"
32610+ "26: movl %%edx, 44(%3)\n"
32611+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
32612+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
32613+ "29: movl %%eax, 48(%3)\n"
32614+ "30: movl %%edx, 52(%3)\n"
32615+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
32616+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
32617+ "33: movl %%eax, 56(%3)\n"
32618+ "34: movl %%edx, 60(%3)\n"
32619+ " addl $-64, %0\n"
32620+ " addl $64, %4\n"
32621+ " addl $64, %3\n"
32622+ " cmpl $63, %0\n"
32623+ " ja 1b\n"
32624+ "35: movl %0, %%eax\n"
32625+ " shrl $2, %0\n"
32626+ " andl $3, %%eax\n"
32627+ " cld\n"
32628+ "99: rep; "__copyuser_seg" movsl\n"
32629+ "36: movl %%eax, %0\n"
32630+ "37: rep; "__copyuser_seg" movsb\n"
32631+ "100:\n"
32632 ".section .fixup,\"ax\"\n"
32633 "101: lea 0(%%eax,%0,4),%0\n"
32634 " jmp 100b\n"
32635@@ -207,41 +315,41 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
32636 int d0, d1;
32637 __asm__ __volatile__(
32638 " .align 2,0x90\n"
32639- "0: movl 32(%4), %%eax\n"
32640+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
32641 " cmpl $67, %0\n"
32642 " jbe 2f\n"
32643- "1: movl 64(%4), %%eax\n"
32644+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
32645 " .align 2,0x90\n"
32646- "2: movl 0(%4), %%eax\n"
32647- "21: movl 4(%4), %%edx\n"
32648+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
32649+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
32650 " movl %%eax, 0(%3)\n"
32651 " movl %%edx, 4(%3)\n"
32652- "3: movl 8(%4), %%eax\n"
32653- "31: movl 12(%4),%%edx\n"
32654+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
32655+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
32656 " movl %%eax, 8(%3)\n"
32657 " movl %%edx, 12(%3)\n"
32658- "4: movl 16(%4), %%eax\n"
32659- "41: movl 20(%4), %%edx\n"
32660+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
32661+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
32662 " movl %%eax, 16(%3)\n"
32663 " movl %%edx, 20(%3)\n"
32664- "10: movl 24(%4), %%eax\n"
32665- "51: movl 28(%4), %%edx\n"
32666+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
32667+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
32668 " movl %%eax, 24(%3)\n"
32669 " movl %%edx, 28(%3)\n"
32670- "11: movl 32(%4), %%eax\n"
32671- "61: movl 36(%4), %%edx\n"
32672+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
32673+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
32674 " movl %%eax, 32(%3)\n"
32675 " movl %%edx, 36(%3)\n"
32676- "12: movl 40(%4), %%eax\n"
32677- "71: movl 44(%4), %%edx\n"
32678+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
32679+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
32680 " movl %%eax, 40(%3)\n"
32681 " movl %%edx, 44(%3)\n"
32682- "13: movl 48(%4), %%eax\n"
32683- "81: movl 52(%4), %%edx\n"
32684+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
32685+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
32686 " movl %%eax, 48(%3)\n"
32687 " movl %%edx, 52(%3)\n"
32688- "14: movl 56(%4), %%eax\n"
32689- "91: movl 60(%4), %%edx\n"
32690+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
32691+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
32692 " movl %%eax, 56(%3)\n"
32693 " movl %%edx, 60(%3)\n"
32694 " addl $-64, %0\n"
32695@@ -253,9 +361,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
32696 " shrl $2, %0\n"
32697 " andl $3, %%eax\n"
32698 " cld\n"
32699- "6: rep; movsl\n"
32700+ "6: rep; "__copyuser_seg" movsl\n"
32701 " movl %%eax,%0\n"
32702- "7: rep; movsb\n"
32703+ "7: rep; "__copyuser_seg" movsb\n"
32704 "8:\n"
32705 ".section .fixup,\"ax\"\n"
32706 "9: lea 0(%%eax,%0,4),%0\n"
32707@@ -305,41 +413,41 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
32708
32709 __asm__ __volatile__(
32710 " .align 2,0x90\n"
32711- "0: movl 32(%4), %%eax\n"
32712+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
32713 " cmpl $67, %0\n"
32714 " jbe 2f\n"
32715- "1: movl 64(%4), %%eax\n"
32716+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
32717 " .align 2,0x90\n"
32718- "2: movl 0(%4), %%eax\n"
32719- "21: movl 4(%4), %%edx\n"
32720+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
32721+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
32722 " movnti %%eax, 0(%3)\n"
32723 " movnti %%edx, 4(%3)\n"
32724- "3: movl 8(%4), %%eax\n"
32725- "31: movl 12(%4),%%edx\n"
32726+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
32727+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
32728 " movnti %%eax, 8(%3)\n"
32729 " movnti %%edx, 12(%3)\n"
32730- "4: movl 16(%4), %%eax\n"
32731- "41: movl 20(%4), %%edx\n"
32732+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
32733+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
32734 " movnti %%eax, 16(%3)\n"
32735 " movnti %%edx, 20(%3)\n"
32736- "10: movl 24(%4), %%eax\n"
32737- "51: movl 28(%4), %%edx\n"
32738+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
32739+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
32740 " movnti %%eax, 24(%3)\n"
32741 " movnti %%edx, 28(%3)\n"
32742- "11: movl 32(%4), %%eax\n"
32743- "61: movl 36(%4), %%edx\n"
32744+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
32745+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
32746 " movnti %%eax, 32(%3)\n"
32747 " movnti %%edx, 36(%3)\n"
32748- "12: movl 40(%4), %%eax\n"
32749- "71: movl 44(%4), %%edx\n"
32750+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
32751+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
32752 " movnti %%eax, 40(%3)\n"
32753 " movnti %%edx, 44(%3)\n"
32754- "13: movl 48(%4), %%eax\n"
32755- "81: movl 52(%4), %%edx\n"
32756+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
32757+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
32758 " movnti %%eax, 48(%3)\n"
32759 " movnti %%edx, 52(%3)\n"
32760- "14: movl 56(%4), %%eax\n"
32761- "91: movl 60(%4), %%edx\n"
32762+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
32763+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
32764 " movnti %%eax, 56(%3)\n"
32765 " movnti %%edx, 60(%3)\n"
32766 " addl $-64, %0\n"
32767@@ -352,9 +460,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
32768 " shrl $2, %0\n"
32769 " andl $3, %%eax\n"
32770 " cld\n"
32771- "6: rep; movsl\n"
32772+ "6: rep; "__copyuser_seg" movsl\n"
32773 " movl %%eax,%0\n"
32774- "7: rep; movsb\n"
32775+ "7: rep; "__copyuser_seg" movsb\n"
32776 "8:\n"
32777 ".section .fixup,\"ax\"\n"
32778 "9: lea 0(%%eax,%0,4),%0\n"
32779@@ -399,41 +507,41 @@ static unsigned long __copy_user_intel_nocache(void *to,
32780
32781 __asm__ __volatile__(
32782 " .align 2,0x90\n"
32783- "0: movl 32(%4), %%eax\n"
32784+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
32785 " cmpl $67, %0\n"
32786 " jbe 2f\n"
32787- "1: movl 64(%4), %%eax\n"
32788+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
32789 " .align 2,0x90\n"
32790- "2: movl 0(%4), %%eax\n"
32791- "21: movl 4(%4), %%edx\n"
32792+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
32793+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
32794 " movnti %%eax, 0(%3)\n"
32795 " movnti %%edx, 4(%3)\n"
32796- "3: movl 8(%4), %%eax\n"
32797- "31: movl 12(%4),%%edx\n"
32798+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
32799+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
32800 " movnti %%eax, 8(%3)\n"
32801 " movnti %%edx, 12(%3)\n"
32802- "4: movl 16(%4), %%eax\n"
32803- "41: movl 20(%4), %%edx\n"
32804+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
32805+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
32806 " movnti %%eax, 16(%3)\n"
32807 " movnti %%edx, 20(%3)\n"
32808- "10: movl 24(%4), %%eax\n"
32809- "51: movl 28(%4), %%edx\n"
32810+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
32811+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
32812 " movnti %%eax, 24(%3)\n"
32813 " movnti %%edx, 28(%3)\n"
32814- "11: movl 32(%4), %%eax\n"
32815- "61: movl 36(%4), %%edx\n"
32816+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
32817+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
32818 " movnti %%eax, 32(%3)\n"
32819 " movnti %%edx, 36(%3)\n"
32820- "12: movl 40(%4), %%eax\n"
32821- "71: movl 44(%4), %%edx\n"
32822+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
32823+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
32824 " movnti %%eax, 40(%3)\n"
32825 " movnti %%edx, 44(%3)\n"
32826- "13: movl 48(%4), %%eax\n"
32827- "81: movl 52(%4), %%edx\n"
32828+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
32829+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
32830 " movnti %%eax, 48(%3)\n"
32831 " movnti %%edx, 52(%3)\n"
32832- "14: movl 56(%4), %%eax\n"
32833- "91: movl 60(%4), %%edx\n"
32834+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
32835+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
32836 " movnti %%eax, 56(%3)\n"
32837 " movnti %%edx, 60(%3)\n"
32838 " addl $-64, %0\n"
32839@@ -446,9 +554,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
32840 " shrl $2, %0\n"
32841 " andl $3, %%eax\n"
32842 " cld\n"
32843- "6: rep; movsl\n"
32844+ "6: rep; "__copyuser_seg" movsl\n"
32845 " movl %%eax,%0\n"
32846- "7: rep; movsb\n"
32847+ "7: rep; "__copyuser_seg" movsb\n"
32848 "8:\n"
32849 ".section .fixup,\"ax\"\n"
32850 "9: lea 0(%%eax,%0,4),%0\n"
32851@@ -488,32 +596,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
32852 */
32853 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
32854 unsigned long size);
32855-unsigned long __copy_user_intel(void __user *to, const void *from,
32856+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
32857+ unsigned long size);
32858+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
32859 unsigned long size);
32860 unsigned long __copy_user_zeroing_intel_nocache(void *to,
32861 const void __user *from, unsigned long size);
32862 #endif /* CONFIG_X86_INTEL_USERCOPY */
32863
32864 /* Generic arbitrary sized copy. */
32865-#define __copy_user(to, from, size) \
32866+#define __copy_user(to, from, size, prefix, set, restore) \
32867 do { \
32868 int __d0, __d1, __d2; \
32869 __asm__ __volatile__( \
32870+ set \
32871 " cmp $7,%0\n" \
32872 " jbe 1f\n" \
32873 " movl %1,%0\n" \
32874 " negl %0\n" \
32875 " andl $7,%0\n" \
32876 " subl %0,%3\n" \
32877- "4: rep; movsb\n" \
32878+ "4: rep; "prefix"movsb\n" \
32879 " movl %3,%0\n" \
32880 " shrl $2,%0\n" \
32881 " andl $3,%3\n" \
32882 " .align 2,0x90\n" \
32883- "0: rep; movsl\n" \
32884+ "0: rep; "prefix"movsl\n" \
32885 " movl %3,%0\n" \
32886- "1: rep; movsb\n" \
32887+ "1: rep; "prefix"movsb\n" \
32888 "2:\n" \
32889+ restore \
32890 ".section .fixup,\"ax\"\n" \
32891 "5: addl %3,%0\n" \
32892 " jmp 2b\n" \
32893@@ -538,14 +650,14 @@ do { \
32894 " negl %0\n" \
32895 " andl $7,%0\n" \
32896 " subl %0,%3\n" \
32897- "4: rep; movsb\n" \
32898+ "4: rep; "__copyuser_seg"movsb\n" \
32899 " movl %3,%0\n" \
32900 " shrl $2,%0\n" \
32901 " andl $3,%3\n" \
32902 " .align 2,0x90\n" \
32903- "0: rep; movsl\n" \
32904+ "0: rep; "__copyuser_seg"movsl\n" \
32905 " movl %3,%0\n" \
32906- "1: rep; movsb\n" \
32907+ "1: rep; "__copyuser_seg"movsb\n" \
32908 "2:\n" \
32909 ".section .fixup,\"ax\"\n" \
32910 "5: addl %3,%0\n" \
32911@@ -572,9 +684,9 @@ unsigned long __copy_to_user_ll(void __user *to, const void *from,
32912 {
32913 stac();
32914 if (movsl_is_ok(to, from, n))
32915- __copy_user(to, from, n);
32916+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
32917 else
32918- n = __copy_user_intel(to, from, n);
32919+ n = __generic_copy_to_user_intel(to, from, n);
32920 clac();
32921 return n;
32922 }
32923@@ -598,10 +710,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
32924 {
32925 stac();
32926 if (movsl_is_ok(to, from, n))
32927- __copy_user(to, from, n);
32928+ __copy_user(to, from, n, __copyuser_seg, "", "");
32929 else
32930- n = __copy_user_intel((void __user *)to,
32931- (const void *)from, n);
32932+ n = __generic_copy_from_user_intel(to, from, n);
32933 clac();
32934 return n;
32935 }
32936@@ -632,58 +743,38 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
32937 if (n > 64 && cpu_has_xmm2)
32938 n = __copy_user_intel_nocache(to, from, n);
32939 else
32940- __copy_user(to, from, n);
32941+ __copy_user(to, from, n, __copyuser_seg, "", "");
32942 #else
32943- __copy_user(to, from, n);
32944+ __copy_user(to, from, n, __copyuser_seg, "", "");
32945 #endif
32946 clac();
32947 return n;
32948 }
32949 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
32950
32951-/**
32952- * copy_to_user: - Copy a block of data into user space.
32953- * @to: Destination address, in user space.
32954- * @from: Source address, in kernel space.
32955- * @n: Number of bytes to copy.
32956- *
32957- * Context: User context only. This function may sleep.
32958- *
32959- * Copy data from kernel space to user space.
32960- *
32961- * Returns number of bytes that could not be copied.
32962- * On success, this will be zero.
32963- */
32964-unsigned long _copy_to_user(void __user *to, const void *from, unsigned n)
32965+#ifdef CONFIG_PAX_MEMORY_UDEREF
32966+void __set_fs(mm_segment_t x)
32967 {
32968- if (access_ok(VERIFY_WRITE, to, n))
32969- n = __copy_to_user(to, from, n);
32970- return n;
32971+ switch (x.seg) {
32972+ case 0:
32973+ loadsegment(gs, 0);
32974+ break;
32975+ case TASK_SIZE_MAX:
32976+ loadsegment(gs, __USER_DS);
32977+ break;
32978+ case -1UL:
32979+ loadsegment(gs, __KERNEL_DS);
32980+ break;
32981+ default:
32982+ BUG();
32983+ }
32984 }
32985-EXPORT_SYMBOL(_copy_to_user);
32986+EXPORT_SYMBOL(__set_fs);
32987
32988-/**
32989- * copy_from_user: - Copy a block of data from user space.
32990- * @to: Destination address, in kernel space.
32991- * @from: Source address, in user space.
32992- * @n: Number of bytes to copy.
32993- *
32994- * Context: User context only. This function may sleep.
32995- *
32996- * Copy data from user space to kernel space.
32997- *
32998- * Returns number of bytes that could not be copied.
32999- * On success, this will be zero.
33000- *
33001- * If some data could not be copied, this function will pad the copied
33002- * data to the requested size using zero bytes.
33003- */
33004-unsigned long _copy_from_user(void *to, const void __user *from, unsigned n)
33005+void set_fs(mm_segment_t x)
33006 {
33007- if (access_ok(VERIFY_READ, from, n))
33008- n = __copy_from_user(to, from, n);
33009- else
33010- memset(to, 0, n);
33011- return n;
33012+ current_thread_info()->addr_limit = x;
33013+ __set_fs(x);
33014 }
33015-EXPORT_SYMBOL(_copy_from_user);
33016+EXPORT_SYMBOL(set_fs);
33017+#endif
33018diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
33019index c905e89..01ab928 100644
33020--- a/arch/x86/lib/usercopy_64.c
33021+++ b/arch/x86/lib/usercopy_64.c
33022@@ -18,6 +18,7 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
33023 might_fault();
33024 /* no memory constraint because it doesn't change any memory gcc knows
33025 about */
33026+ pax_open_userland();
33027 stac();
33028 asm volatile(
33029 " testq %[size8],%[size8]\n"
33030@@ -39,9 +40,10 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
33031 _ASM_EXTABLE(0b,3b)
33032 _ASM_EXTABLE(1b,2b)
33033 : [size8] "=&c"(size), [dst] "=&D" (__d0)
33034- : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr),
33035+ : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(____m(addr)),
33036 [zero] "r" (0UL), [eight] "r" (8UL));
33037 clac();
33038+ pax_close_userland();
33039 return size;
33040 }
33041 EXPORT_SYMBOL(__clear_user);
33042@@ -54,12 +56,11 @@ unsigned long clear_user(void __user *to, unsigned long n)
33043 }
33044 EXPORT_SYMBOL(clear_user);
33045
33046-unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
33047+unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
33048 {
33049- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
33050- return copy_user_generic((__force void *)to, (__force void *)from, len);
33051- }
33052- return len;
33053+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len))
33054+ return copy_user_generic((void __force_kernel *)____m(to), (void __force_kernel *)____m(from), len);
33055+ return len;
33056 }
33057 EXPORT_SYMBOL(copy_in_user);
33058
33059@@ -69,11 +70,13 @@ EXPORT_SYMBOL(copy_in_user);
33060 * it is not necessary to optimize tail handling.
33061 */
33062 __visible unsigned long
33063-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
33064+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
33065 {
33066 char c;
33067 unsigned zero_len;
33068
33069+ clac();
33070+ pax_close_userland();
33071 for (; len; --len, to++) {
33072 if (__get_user_nocheck(c, from++, sizeof(char)))
33073 break;
33074@@ -84,6 +87,5 @@ copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
33075 for (c = 0, zero_len = len; zerorest && zero_len; --zero_len)
33076 if (__put_user_nocheck(c, to++, sizeof(char)))
33077 break;
33078- clac();
33079 return len;
33080 }
33081diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
33082index 6a19ad9..1c48f9a 100644
33083--- a/arch/x86/mm/Makefile
33084+++ b/arch/x86/mm/Makefile
33085@@ -30,3 +30,7 @@ obj-$(CONFIG_ACPI_NUMA) += srat.o
33086 obj-$(CONFIG_NUMA_EMU) += numa_emulation.o
33087
33088 obj-$(CONFIG_MEMTEST) += memtest.o
33089+
33090+quote:="
33091+obj-$(CONFIG_X86_64) += uderef_64.o
33092+CFLAGS_uderef_64.o := $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS))
33093diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
33094index 903ec1e..c4166b2 100644
33095--- a/arch/x86/mm/extable.c
33096+++ b/arch/x86/mm/extable.c
33097@@ -6,12 +6,24 @@
33098 static inline unsigned long
33099 ex_insn_addr(const struct exception_table_entry *x)
33100 {
33101- return (unsigned long)&x->insn + x->insn;
33102+ unsigned long reloc = 0;
33103+
33104+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
33105+ reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
33106+#endif
33107+
33108+ return (unsigned long)&x->insn + x->insn + reloc;
33109 }
33110 static inline unsigned long
33111 ex_fixup_addr(const struct exception_table_entry *x)
33112 {
33113- return (unsigned long)&x->fixup + x->fixup;
33114+ unsigned long reloc = 0;
33115+
33116+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
33117+ reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
33118+#endif
33119+
33120+ return (unsigned long)&x->fixup + x->fixup + reloc;
33121 }
33122
33123 int fixup_exception(struct pt_regs *regs)
33124@@ -20,7 +32,7 @@ int fixup_exception(struct pt_regs *regs)
33125 unsigned long new_ip;
33126
33127 #ifdef CONFIG_PNPBIOS
33128- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
33129+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
33130 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
33131 extern u32 pnp_bios_is_utter_crap;
33132 pnp_bios_is_utter_crap = 1;
33133@@ -145,6 +157,13 @@ void sort_extable(struct exception_table_entry *start,
33134 i += 4;
33135 p->fixup -= i;
33136 i += 4;
33137+
33138+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
33139+ BUILD_BUG_ON(!IS_ENABLED(CONFIG_BUILDTIME_EXTABLE_SORT));
33140+ p->insn -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
33141+ p->fixup -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
33142+#endif
33143+
33144 }
33145 }
33146
33147diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
33148index a241946..d7a04cf 100644
33149--- a/arch/x86/mm/fault.c
33150+++ b/arch/x86/mm/fault.c
33151@@ -14,12 +14,19 @@
33152 #include <linux/hugetlb.h> /* hstate_index_to_shift */
33153 #include <linux/prefetch.h> /* prefetchw */
33154 #include <linux/context_tracking.h> /* exception_enter(), ... */
33155+#include <linux/unistd.h>
33156+#include <linux/compiler.h>
33157
33158 #include <asm/traps.h> /* dotraplinkage, ... */
33159 #include <asm/pgalloc.h> /* pgd_*(), ... */
33160 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
33161 #include <asm/fixmap.h> /* VSYSCALL_ADDR */
33162 #include <asm/vsyscall.h> /* emulate_vsyscall */
33163+#include <asm/tlbflush.h>
33164+
33165+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
33166+#include <asm/stacktrace.h>
33167+#endif
33168
33169 #define CREATE_TRACE_POINTS
33170 #include <asm/trace/exceptions.h>
33171@@ -60,7 +67,7 @@ static nokprobe_inline int kprobes_fault(struct pt_regs *regs)
33172 int ret = 0;
33173
33174 /* kprobe_running() needs smp_processor_id() */
33175- if (kprobes_built_in() && !user_mode_vm(regs)) {
33176+ if (kprobes_built_in() && !user_mode(regs)) {
33177 preempt_disable();
33178 if (kprobe_running() && kprobe_fault_handler(regs, 14))
33179 ret = 1;
33180@@ -121,7 +128,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
33181 return !instr_lo || (instr_lo>>1) == 1;
33182 case 0x00:
33183 /* Prefetch instruction is 0x0F0D or 0x0F18 */
33184- if (probe_kernel_address(instr, opcode))
33185+ if (user_mode(regs)) {
33186+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
33187+ return 0;
33188+ } else if (probe_kernel_address(instr, opcode))
33189 return 0;
33190
33191 *prefetch = (instr_lo == 0xF) &&
33192@@ -155,7 +165,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
33193 while (instr < max_instr) {
33194 unsigned char opcode;
33195
33196- if (probe_kernel_address(instr, opcode))
33197+ if (user_mode(regs)) {
33198+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
33199+ break;
33200+ } else if (probe_kernel_address(instr, opcode))
33201 break;
33202
33203 instr++;
33204@@ -186,6 +199,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
33205 force_sig_info(si_signo, &info, tsk);
33206 }
33207
33208+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
33209+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
33210+#endif
33211+
33212+#ifdef CONFIG_PAX_EMUTRAMP
33213+static int pax_handle_fetch_fault(struct pt_regs *regs);
33214+#endif
33215+
33216+#ifdef CONFIG_PAX_PAGEEXEC
33217+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
33218+{
33219+ pgd_t *pgd;
33220+ pud_t *pud;
33221+ pmd_t *pmd;
33222+
33223+ pgd = pgd_offset(mm, address);
33224+ if (!pgd_present(*pgd))
33225+ return NULL;
33226+ pud = pud_offset(pgd, address);
33227+ if (!pud_present(*pud))
33228+ return NULL;
33229+ pmd = pmd_offset(pud, address);
33230+ if (!pmd_present(*pmd))
33231+ return NULL;
33232+ return pmd;
33233+}
33234+#endif
33235+
33236 DEFINE_SPINLOCK(pgd_lock);
33237 LIST_HEAD(pgd_list);
33238
33239@@ -236,10 +277,27 @@ void vmalloc_sync_all(void)
33240 for (address = VMALLOC_START & PMD_MASK;
33241 address >= TASK_SIZE && address < FIXADDR_TOP;
33242 address += PMD_SIZE) {
33243+
33244+#ifdef CONFIG_PAX_PER_CPU_PGD
33245+ unsigned long cpu;
33246+#else
33247 struct page *page;
33248+#endif
33249
33250 spin_lock(&pgd_lock);
33251+
33252+#ifdef CONFIG_PAX_PER_CPU_PGD
33253+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
33254+ pgd_t *pgd = get_cpu_pgd(cpu, user);
33255+ pmd_t *ret;
33256+
33257+ ret = vmalloc_sync_one(pgd, address);
33258+ if (!ret)
33259+ break;
33260+ pgd = get_cpu_pgd(cpu, kernel);
33261+#else
33262 list_for_each_entry(page, &pgd_list, lru) {
33263+ pgd_t *pgd;
33264 spinlock_t *pgt_lock;
33265 pmd_t *ret;
33266
33267@@ -247,8 +305,14 @@ void vmalloc_sync_all(void)
33268 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
33269
33270 spin_lock(pgt_lock);
33271- ret = vmalloc_sync_one(page_address(page), address);
33272+ pgd = page_address(page);
33273+#endif
33274+
33275+ ret = vmalloc_sync_one(pgd, address);
33276+
33277+#ifndef CONFIG_PAX_PER_CPU_PGD
33278 spin_unlock(pgt_lock);
33279+#endif
33280
33281 if (!ret)
33282 break;
33283@@ -282,6 +346,12 @@ static noinline int vmalloc_fault(unsigned long address)
33284 * an interrupt in the middle of a task switch..
33285 */
33286 pgd_paddr = read_cr3();
33287+
33288+#ifdef CONFIG_PAX_PER_CPU_PGD
33289+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id(), kernel)) != (pgd_paddr & __PHYSICAL_MASK));
33290+ vmalloc_sync_one(__va(pgd_paddr + PAGE_SIZE), address);
33291+#endif
33292+
33293 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
33294 if (!pmd_k)
33295 return -1;
33296@@ -378,11 +448,25 @@ static noinline int vmalloc_fault(unsigned long address)
33297 * happen within a race in page table update. In the later
33298 * case just flush:
33299 */
33300- pgd = pgd_offset(current->active_mm, address);
33301+
33302 pgd_ref = pgd_offset_k(address);
33303 if (pgd_none(*pgd_ref))
33304 return -1;
33305
33306+#ifdef CONFIG_PAX_PER_CPU_PGD
33307+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id(), kernel)) != (read_cr3() & __PHYSICAL_MASK));
33308+ pgd = pgd_offset_cpu(smp_processor_id(), user, address);
33309+ if (pgd_none(*pgd)) {
33310+ set_pgd(pgd, *pgd_ref);
33311+ arch_flush_lazy_mmu_mode();
33312+ } else {
33313+ BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
33314+ }
33315+ pgd = pgd_offset_cpu(smp_processor_id(), kernel, address);
33316+#else
33317+ pgd = pgd_offset(current->active_mm, address);
33318+#endif
33319+
33320 if (pgd_none(*pgd)) {
33321 set_pgd(pgd, *pgd_ref);
33322 arch_flush_lazy_mmu_mode();
33323@@ -549,7 +633,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
33324 static int is_errata100(struct pt_regs *regs, unsigned long address)
33325 {
33326 #ifdef CONFIG_X86_64
33327- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
33328+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
33329 return 1;
33330 #endif
33331 return 0;
33332@@ -576,9 +660,9 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
33333 }
33334
33335 static const char nx_warning[] = KERN_CRIT
33336-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
33337+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
33338 static const char smep_warning[] = KERN_CRIT
33339-"unable to execute userspace code (SMEP?) (uid: %d)\n";
33340+"unable to execute userspace code (SMEP?) (uid: %d, task: %s, pid: %d)\n";
33341
33342 static void
33343 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
33344@@ -587,7 +671,7 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
33345 if (!oops_may_print())
33346 return;
33347
33348- if (error_code & PF_INSTR) {
33349+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
33350 unsigned int level;
33351 pgd_t *pgd;
33352 pte_t *pte;
33353@@ -598,13 +682,25 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
33354 pte = lookup_address_in_pgd(pgd, address, &level);
33355
33356 if (pte && pte_present(*pte) && !pte_exec(*pte))
33357- printk(nx_warning, from_kuid(&init_user_ns, current_uid()));
33358+ printk(nx_warning, from_kuid_munged(&init_user_ns, current_uid()), current->comm, task_pid_nr(current));
33359 if (pte && pte_present(*pte) && pte_exec(*pte) &&
33360 (pgd_flags(*pgd) & _PAGE_USER) &&
33361 (read_cr4() & X86_CR4_SMEP))
33362- printk(smep_warning, from_kuid(&init_user_ns, current_uid()));
33363+ printk(smep_warning, from_kuid(&init_user_ns, current_uid()), current->comm, task_pid_nr(current));
33364 }
33365
33366+#ifdef CONFIG_PAX_KERNEXEC
33367+ if (init_mm.start_code <= address && address < init_mm.end_code) {
33368+ if (current->signal->curr_ip)
33369+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
33370+ &current->signal->curr_ip, current->comm, task_pid_nr(current),
33371+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
33372+ else
33373+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
33374+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
33375+ }
33376+#endif
33377+
33378 printk(KERN_ALERT "BUG: unable to handle kernel ");
33379 if (address < PAGE_SIZE)
33380 printk(KERN_CONT "NULL pointer dereference");
33381@@ -785,6 +881,22 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
33382 return;
33383 }
33384 #endif
33385+
33386+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
33387+ if (pax_is_fetch_fault(regs, error_code, address)) {
33388+
33389+#ifdef CONFIG_PAX_EMUTRAMP
33390+ switch (pax_handle_fetch_fault(regs)) {
33391+ case 2:
33392+ return;
33393+ }
33394+#endif
33395+
33396+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
33397+ do_group_exit(SIGKILL);
33398+ }
33399+#endif
33400+
33401 /* Kernel addresses are always protection faults: */
33402 if (address >= TASK_SIZE)
33403 error_code |= PF_PROT;
33404@@ -870,7 +982,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
33405 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
33406 printk(KERN_ERR
33407 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
33408- tsk->comm, tsk->pid, address);
33409+ tsk->comm, task_pid_nr(tsk), address);
33410 code = BUS_MCEERR_AR;
33411 }
33412 #endif
33413@@ -924,6 +1036,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
33414 return 1;
33415 }
33416
33417+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
33418+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
33419+{
33420+ pte_t *pte;
33421+ pmd_t *pmd;
33422+ spinlock_t *ptl;
33423+ unsigned char pte_mask;
33424+
33425+ if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
33426+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
33427+ return 0;
33428+
33429+ /* PaX: it's our fault, let's handle it if we can */
33430+
33431+ /* PaX: take a look at read faults before acquiring any locks */
33432+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
33433+ /* instruction fetch attempt from a protected page in user mode */
33434+ up_read(&mm->mmap_sem);
33435+
33436+#ifdef CONFIG_PAX_EMUTRAMP
33437+ switch (pax_handle_fetch_fault(regs)) {
33438+ case 2:
33439+ return 1;
33440+ }
33441+#endif
33442+
33443+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
33444+ do_group_exit(SIGKILL);
33445+ }
33446+
33447+ pmd = pax_get_pmd(mm, address);
33448+ if (unlikely(!pmd))
33449+ return 0;
33450+
33451+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
33452+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
33453+ pte_unmap_unlock(pte, ptl);
33454+ return 0;
33455+ }
33456+
33457+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
33458+ /* write attempt to a protected page in user mode */
33459+ pte_unmap_unlock(pte, ptl);
33460+ return 0;
33461+ }
33462+
33463+#ifdef CONFIG_SMP
33464+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
33465+#else
33466+ if (likely(address > get_limit(regs->cs)))
33467+#endif
33468+ {
33469+ set_pte(pte, pte_mkread(*pte));
33470+ __flush_tlb_one(address);
33471+ pte_unmap_unlock(pte, ptl);
33472+ up_read(&mm->mmap_sem);
33473+ return 1;
33474+ }
33475+
33476+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
33477+
33478+ /*
33479+ * PaX: fill DTLB with user rights and retry
33480+ */
33481+ __asm__ __volatile__ (
33482+ "orb %2,(%1)\n"
33483+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
33484+/*
33485+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
33486+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
33487+ * page fault when examined during a TLB load attempt. this is true not only
33488+ * for PTEs holding a non-present entry but also present entries that will
33489+ * raise a page fault (such as those set up by PaX, or the copy-on-write
33490+ * mechanism). in effect it means that we do *not* need to flush the TLBs
33491+ * for our target pages since their PTEs are simply not in the TLBs at all.
33492+
33493+ * the best thing in omitting it is that we gain around 15-20% speed in the
33494+ * fast path of the page fault handler and can get rid of tracing since we
33495+ * can no longer flush unintended entries.
33496+ */
33497+ "invlpg (%0)\n"
33498+#endif
33499+ __copyuser_seg"testb $0,(%0)\n"
33500+ "xorb %3,(%1)\n"
33501+ :
33502+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
33503+ : "memory", "cc");
33504+ pte_unmap_unlock(pte, ptl);
33505+ up_read(&mm->mmap_sem);
33506+ return 1;
33507+}
33508+#endif
33509+
33510 /*
33511 * Handle a spurious fault caused by a stale TLB entry.
33512 *
33513@@ -991,6 +1196,9 @@ int show_unhandled_signals = 1;
33514 static inline int
33515 access_error(unsigned long error_code, struct vm_area_struct *vma)
33516 {
33517+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
33518+ return 1;
33519+
33520 if (error_code & PF_WRITE) {
33521 /* write, present and write, not present: */
33522 if (unlikely(!(vma->vm_flags & VM_WRITE)))
33523@@ -1025,7 +1233,7 @@ static inline bool smap_violation(int error_code, struct pt_regs *regs)
33524 if (error_code & PF_USER)
33525 return false;
33526
33527- if (!user_mode_vm(regs) && (regs->flags & X86_EFLAGS_AC))
33528+ if (!user_mode(regs) && (regs->flags & X86_EFLAGS_AC))
33529 return false;
33530
33531 return true;
33532@@ -1053,6 +1261,22 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
33533 tsk = current;
33534 mm = tsk->mm;
33535
33536+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
33537+ if (!user_mode(regs) && address < 2 * pax_user_shadow_base) {
33538+ if (!search_exception_tables(regs->ip)) {
33539+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
33540+ bad_area_nosemaphore(regs, error_code, address);
33541+ return;
33542+ }
33543+ if (address < pax_user_shadow_base) {
33544+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
33545+ printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
33546+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
33547+ } else
33548+ address -= pax_user_shadow_base;
33549+ }
33550+#endif
33551+
33552 /*
33553 * Detect and handle instructions that would cause a page fault for
33554 * both a tracked kernel page and a userspace page.
33555@@ -1130,7 +1354,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
33556 * User-mode registers count as a user access even for any
33557 * potential system fault or CPU buglet:
33558 */
33559- if (user_mode_vm(regs)) {
33560+ if (user_mode(regs)) {
33561 local_irq_enable();
33562 error_code |= PF_USER;
33563 flags |= FAULT_FLAG_USER;
33564@@ -1177,6 +1401,11 @@ retry:
33565 might_sleep();
33566 }
33567
33568+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
33569+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
33570+ return;
33571+#endif
33572+
33573 vma = find_vma(mm, address);
33574 if (unlikely(!vma)) {
33575 bad_area(regs, error_code, address);
33576@@ -1188,18 +1417,24 @@ retry:
33577 bad_area(regs, error_code, address);
33578 return;
33579 }
33580- if (error_code & PF_USER) {
33581- /*
33582- * Accessing the stack below %sp is always a bug.
33583- * The large cushion allows instructions like enter
33584- * and pusha to work. ("enter $65535, $31" pushes
33585- * 32 pointers and then decrements %sp by 65535.)
33586- */
33587- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
33588- bad_area(regs, error_code, address);
33589- return;
33590- }
33591+ /*
33592+ * Accessing the stack below %sp is always a bug.
33593+ * The large cushion allows instructions like enter
33594+ * and pusha to work. ("enter $65535, $31" pushes
33595+ * 32 pointers and then decrements %sp by 65535.)
33596+ */
33597+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
33598+ bad_area(regs, error_code, address);
33599+ return;
33600 }
33601+
33602+#ifdef CONFIG_PAX_SEGMEXEC
33603+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
33604+ bad_area(regs, error_code, address);
33605+ return;
33606+ }
33607+#endif
33608+
33609 if (unlikely(expand_stack(vma, address))) {
33610 bad_area(regs, error_code, address);
33611 return;
33612@@ -1316,3 +1551,292 @@ trace_do_page_fault(struct pt_regs *regs, unsigned long error_code)
33613 }
33614 NOKPROBE_SYMBOL(trace_do_page_fault);
33615 #endif /* CONFIG_TRACING */
33616+
33617+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
33618+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
33619+{
33620+ struct mm_struct *mm = current->mm;
33621+ unsigned long ip = regs->ip;
33622+
33623+ if (v8086_mode(regs))
33624+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
33625+
33626+#ifdef CONFIG_PAX_PAGEEXEC
33627+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
33628+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
33629+ return true;
33630+ if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
33631+ return true;
33632+ return false;
33633+ }
33634+#endif
33635+
33636+#ifdef CONFIG_PAX_SEGMEXEC
33637+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
33638+ if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
33639+ return true;
33640+ return false;
33641+ }
33642+#endif
33643+
33644+ return false;
33645+}
33646+#endif
33647+
33648+#ifdef CONFIG_PAX_EMUTRAMP
33649+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
33650+{
33651+ int err;
33652+
33653+ do { /* PaX: libffi trampoline emulation */
33654+ unsigned char mov, jmp;
33655+ unsigned int addr1, addr2;
33656+
33657+#ifdef CONFIG_X86_64
33658+ if ((regs->ip + 9) >> 32)
33659+ break;
33660+#endif
33661+
33662+ err = get_user(mov, (unsigned char __user *)regs->ip);
33663+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
33664+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
33665+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
33666+
33667+ if (err)
33668+ break;
33669+
33670+ if (mov == 0xB8 && jmp == 0xE9) {
33671+ regs->ax = addr1;
33672+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
33673+ return 2;
33674+ }
33675+ } while (0);
33676+
33677+ do { /* PaX: gcc trampoline emulation #1 */
33678+ unsigned char mov1, mov2;
33679+ unsigned short jmp;
33680+ unsigned int addr1, addr2;
33681+
33682+#ifdef CONFIG_X86_64
33683+ if ((regs->ip + 11) >> 32)
33684+ break;
33685+#endif
33686+
33687+ err = get_user(mov1, (unsigned char __user *)regs->ip);
33688+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
33689+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
33690+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
33691+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
33692+
33693+ if (err)
33694+ break;
33695+
33696+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
33697+ regs->cx = addr1;
33698+ regs->ax = addr2;
33699+ regs->ip = addr2;
33700+ return 2;
33701+ }
33702+ } while (0);
33703+
33704+ do { /* PaX: gcc trampoline emulation #2 */
33705+ unsigned char mov, jmp;
33706+ unsigned int addr1, addr2;
33707+
33708+#ifdef CONFIG_X86_64
33709+ if ((regs->ip + 9) >> 32)
33710+ break;
33711+#endif
33712+
33713+ err = get_user(mov, (unsigned char __user *)regs->ip);
33714+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
33715+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
33716+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
33717+
33718+ if (err)
33719+ break;
33720+
33721+ if (mov == 0xB9 && jmp == 0xE9) {
33722+ regs->cx = addr1;
33723+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
33724+ return 2;
33725+ }
33726+ } while (0);
33727+
33728+ return 1; /* PaX in action */
33729+}
33730+
33731+#ifdef CONFIG_X86_64
33732+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
33733+{
33734+ int err;
33735+
33736+ do { /* PaX: libffi trampoline emulation */
33737+ unsigned short mov1, mov2, jmp1;
33738+ unsigned char stcclc, jmp2;
33739+ unsigned long addr1, addr2;
33740+
33741+ err = get_user(mov1, (unsigned short __user *)regs->ip);
33742+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
33743+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
33744+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
33745+ err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
33746+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
33747+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
33748+
33749+ if (err)
33750+ break;
33751+
33752+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
33753+ regs->r11 = addr1;
33754+ regs->r10 = addr2;
33755+ if (stcclc == 0xF8)
33756+ regs->flags &= ~X86_EFLAGS_CF;
33757+ else
33758+ regs->flags |= X86_EFLAGS_CF;
33759+ regs->ip = addr1;
33760+ return 2;
33761+ }
33762+ } while (0);
33763+
33764+ do { /* PaX: gcc trampoline emulation #1 */
33765+ unsigned short mov1, mov2, jmp1;
33766+ unsigned char jmp2;
33767+ unsigned int addr1;
33768+ unsigned long addr2;
33769+
33770+ err = get_user(mov1, (unsigned short __user *)regs->ip);
33771+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
33772+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
33773+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
33774+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
33775+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
33776+
33777+ if (err)
33778+ break;
33779+
33780+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
33781+ regs->r11 = addr1;
33782+ regs->r10 = addr2;
33783+ regs->ip = addr1;
33784+ return 2;
33785+ }
33786+ } while (0);
33787+
33788+ do { /* PaX: gcc trampoline emulation #2 */
33789+ unsigned short mov1, mov2, jmp1;
33790+ unsigned char jmp2;
33791+ unsigned long addr1, addr2;
33792+
33793+ err = get_user(mov1, (unsigned short __user *)regs->ip);
33794+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
33795+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
33796+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
33797+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
33798+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
33799+
33800+ if (err)
33801+ break;
33802+
33803+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
33804+ regs->r11 = addr1;
33805+ regs->r10 = addr2;
33806+ regs->ip = addr1;
33807+ return 2;
33808+ }
33809+ } while (0);
33810+
33811+ return 1; /* PaX in action */
33812+}
33813+#endif
33814+
33815+/*
33816+ * PaX: decide what to do with offenders (regs->ip = fault address)
33817+ *
33818+ * returns 1 when task should be killed
33819+ * 2 when gcc trampoline was detected
33820+ */
33821+static int pax_handle_fetch_fault(struct pt_regs *regs)
33822+{
33823+ if (v8086_mode(regs))
33824+ return 1;
33825+
33826+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
33827+ return 1;
33828+
33829+#ifdef CONFIG_X86_32
33830+ return pax_handle_fetch_fault_32(regs);
33831+#else
33832+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
33833+ return pax_handle_fetch_fault_32(regs);
33834+ else
33835+ return pax_handle_fetch_fault_64(regs);
33836+#endif
33837+}
33838+#endif
33839+
33840+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
33841+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
33842+{
33843+ long i;
33844+
33845+ printk(KERN_ERR "PAX: bytes at PC: ");
33846+ for (i = 0; i < 20; i++) {
33847+ unsigned char c;
33848+ if (get_user(c, (unsigned char __force_user *)pc+i))
33849+ printk(KERN_CONT "?? ");
33850+ else
33851+ printk(KERN_CONT "%02x ", c);
33852+ }
33853+ printk("\n");
33854+
33855+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
33856+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
33857+ unsigned long c;
33858+ if (get_user(c, (unsigned long __force_user *)sp+i)) {
33859+#ifdef CONFIG_X86_32
33860+ printk(KERN_CONT "???????? ");
33861+#else
33862+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
33863+ printk(KERN_CONT "???????? ???????? ");
33864+ else
33865+ printk(KERN_CONT "???????????????? ");
33866+#endif
33867+ } else {
33868+#ifdef CONFIG_X86_64
33869+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
33870+ printk(KERN_CONT "%08x ", (unsigned int)c);
33871+ printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
33872+ } else
33873+#endif
33874+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
33875+ }
33876+ }
33877+ printk("\n");
33878+}
33879+#endif
33880+
33881+/**
33882+ * probe_kernel_write(): safely attempt to write to a location
33883+ * @dst: address to write to
33884+ * @src: pointer to the data that shall be written
33885+ * @size: size of the data chunk
33886+ *
33887+ * Safely write to address @dst from the buffer at @src. If a kernel fault
33888+ * happens, handle that and return -EFAULT.
33889+ */
33890+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
33891+{
33892+ long ret;
33893+ mm_segment_t old_fs = get_fs();
33894+
33895+ set_fs(KERNEL_DS);
33896+ pagefault_disable();
33897+ pax_open_kernel();
33898+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
33899+ pax_close_kernel();
33900+ pagefault_enable();
33901+ set_fs(old_fs);
33902+
33903+ return ret ? -EFAULT : 0;
33904+}
33905diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
33906index 207d9aef..69030980 100644
33907--- a/arch/x86/mm/gup.c
33908+++ b/arch/x86/mm/gup.c
33909@@ -268,7 +268,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
33910 addr = start;
33911 len = (unsigned long) nr_pages << PAGE_SHIFT;
33912 end = start + len;
33913- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
33914+ if (unlikely(!access_ok_noprefault(write ? VERIFY_WRITE : VERIFY_READ,
33915 (void __user *)start, len)))
33916 return 0;
33917
33918@@ -344,6 +344,10 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
33919 goto slow_irqon;
33920 #endif
33921
33922+ if (unlikely(!access_ok_noprefault(write ? VERIFY_WRITE : VERIFY_READ,
33923+ (void __user *)start, len)))
33924+ return 0;
33925+
33926 /*
33927 * XXX: batch / limit 'nr', to avoid large irq off latency
33928 * needs some instrumenting to determine the common sizes used by
33929diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
33930index 4500142..53a363c 100644
33931--- a/arch/x86/mm/highmem_32.c
33932+++ b/arch/x86/mm/highmem_32.c
33933@@ -45,7 +45,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
33934 idx = type + KM_TYPE_NR*smp_processor_id();
33935 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
33936 BUG_ON(!pte_none(*(kmap_pte-idx)));
33937+
33938+ pax_open_kernel();
33939 set_pte(kmap_pte-idx, mk_pte(page, prot));
33940+ pax_close_kernel();
33941+
33942 arch_flush_lazy_mmu_mode();
33943
33944 return (void *)vaddr;
33945diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
33946index 8b977eb..4732c33 100644
33947--- a/arch/x86/mm/hugetlbpage.c
33948+++ b/arch/x86/mm/hugetlbpage.c
33949@@ -80,23 +80,24 @@ int pud_huge(pud_t pud)
33950 #ifdef CONFIG_HUGETLB_PAGE
33951 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
33952 unsigned long addr, unsigned long len,
33953- unsigned long pgoff, unsigned long flags)
33954+ unsigned long pgoff, unsigned long flags, unsigned long offset)
33955 {
33956 struct hstate *h = hstate_file(file);
33957 struct vm_unmapped_area_info info;
33958-
33959+
33960 info.flags = 0;
33961 info.length = len;
33962 info.low_limit = current->mm->mmap_legacy_base;
33963 info.high_limit = TASK_SIZE;
33964 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
33965 info.align_offset = 0;
33966+ info.threadstack_offset = offset;
33967 return vm_unmapped_area(&info);
33968 }
33969
33970 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
33971 unsigned long addr0, unsigned long len,
33972- unsigned long pgoff, unsigned long flags)
33973+ unsigned long pgoff, unsigned long flags, unsigned long offset)
33974 {
33975 struct hstate *h = hstate_file(file);
33976 struct vm_unmapped_area_info info;
33977@@ -108,6 +109,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
33978 info.high_limit = current->mm->mmap_base;
33979 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
33980 info.align_offset = 0;
33981+ info.threadstack_offset = offset;
33982 addr = vm_unmapped_area(&info);
33983
33984 /*
33985@@ -120,6 +122,12 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
33986 VM_BUG_ON(addr != -ENOMEM);
33987 info.flags = 0;
33988 info.low_limit = TASK_UNMAPPED_BASE;
33989+
33990+#ifdef CONFIG_PAX_RANDMMAP
33991+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
33992+ info.low_limit += current->mm->delta_mmap;
33993+#endif
33994+
33995 info.high_limit = TASK_SIZE;
33996 addr = vm_unmapped_area(&info);
33997 }
33998@@ -134,10 +142,20 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
33999 struct hstate *h = hstate_file(file);
34000 struct mm_struct *mm = current->mm;
34001 struct vm_area_struct *vma;
34002+ unsigned long pax_task_size = TASK_SIZE;
34003+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
34004
34005 if (len & ~huge_page_mask(h))
34006 return -EINVAL;
34007- if (len > TASK_SIZE)
34008+
34009+#ifdef CONFIG_PAX_SEGMEXEC
34010+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
34011+ pax_task_size = SEGMEXEC_TASK_SIZE;
34012+#endif
34013+
34014+ pax_task_size -= PAGE_SIZE;
34015+
34016+ if (len > pax_task_size)
34017 return -ENOMEM;
34018
34019 if (flags & MAP_FIXED) {
34020@@ -146,19 +164,22 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
34021 return addr;
34022 }
34023
34024+#ifdef CONFIG_PAX_RANDMMAP
34025+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
34026+#endif
34027+
34028 if (addr) {
34029 addr = ALIGN(addr, huge_page_size(h));
34030 vma = find_vma(mm, addr);
34031- if (TASK_SIZE - len >= addr &&
34032- (!vma || addr + len <= vma->vm_start))
34033+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
34034 return addr;
34035 }
34036 if (mm->get_unmapped_area == arch_get_unmapped_area)
34037 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
34038- pgoff, flags);
34039+ pgoff, flags, offset);
34040 else
34041 return hugetlb_get_unmapped_area_topdown(file, addr, len,
34042- pgoff, flags);
34043+ pgoff, flags, offset);
34044 }
34045 #endif /* CONFIG_HUGETLB_PAGE */
34046
34047diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
34048index 66dba36..f8082ec 100644
34049--- a/arch/x86/mm/init.c
34050+++ b/arch/x86/mm/init.c
34051@@ -4,6 +4,7 @@
34052 #include <linux/swap.h>
34053 #include <linux/memblock.h>
34054 #include <linux/bootmem.h> /* for max_low_pfn */
34055+#include <linux/tboot.h>
34056
34057 #include <asm/cacheflush.h>
34058 #include <asm/e820.h>
34059@@ -17,6 +18,8 @@
34060 #include <asm/proto.h>
34061 #include <asm/dma.h> /* for MAX_DMA_PFN */
34062 #include <asm/microcode.h>
34063+#include <asm/desc.h>
34064+#include <asm/bios_ebda.h>
34065
34066 /*
34067 * We need to define the tracepoints somewhere, and tlb.c
34068@@ -570,7 +573,18 @@ void __init init_mem_mapping(void)
34069 early_ioremap_page_table_range_init();
34070 #endif
34071
34072+#ifdef CONFIG_PAX_PER_CPU_PGD
34073+ clone_pgd_range(get_cpu_pgd(0, kernel) + KERNEL_PGD_BOUNDARY,
34074+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
34075+ KERNEL_PGD_PTRS);
34076+ clone_pgd_range(get_cpu_pgd(0, user) + KERNEL_PGD_BOUNDARY,
34077+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
34078+ KERNEL_PGD_PTRS);
34079+ load_cr3(get_cpu_pgd(0, kernel));
34080+#else
34081 load_cr3(swapper_pg_dir);
34082+#endif
34083+
34084 __flush_tlb_all();
34085
34086 early_memtest(0, max_pfn_mapped << PAGE_SHIFT);
34087@@ -586,10 +600,40 @@ void __init init_mem_mapping(void)
34088 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
34089 * mmio resources as well as potential bios/acpi data regions.
34090 */
34091+
34092+#ifdef CONFIG_GRKERNSEC_KMEM
34093+static unsigned int ebda_start __read_only;
34094+static unsigned int ebda_end __read_only;
34095+#endif
34096+
34097 int devmem_is_allowed(unsigned long pagenr)
34098 {
34099- if (pagenr < 256)
34100+#ifdef CONFIG_GRKERNSEC_KMEM
34101+ /* allow BDA */
34102+ if (!pagenr)
34103 return 1;
34104+ /* allow EBDA */
34105+ if (pagenr >= ebda_start && pagenr < ebda_end)
34106+ return 1;
34107+ /* if tboot is in use, allow access to its hardcoded serial log range */
34108+ if (tboot_enabled() && ((0x60000 >> PAGE_SHIFT) <= pagenr) && (pagenr < (0x68000 >> PAGE_SHIFT)))
34109+ return 1;
34110+#else
34111+ if (!pagenr)
34112+ return 1;
34113+#ifdef CONFIG_VM86
34114+ if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
34115+ return 1;
34116+#endif
34117+#endif
34118+
34119+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
34120+ return 1;
34121+#ifdef CONFIG_GRKERNSEC_KMEM
34122+ /* throw out everything else below 1MB */
34123+ if (pagenr <= 256)
34124+ return 0;
34125+#endif
34126 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
34127 return 0;
34128 if (!page_is_ram(pagenr))
34129@@ -635,8 +679,117 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
34130 #endif
34131 }
34132
34133+#ifdef CONFIG_GRKERNSEC_KMEM
34134+static inline void gr_init_ebda(void)
34135+{
34136+ unsigned int ebda_addr;
34137+ unsigned int ebda_size = 0;
34138+
34139+ ebda_addr = get_bios_ebda();
34140+ if (ebda_addr) {
34141+ ebda_size = *(unsigned char *)phys_to_virt(ebda_addr);
34142+ ebda_size <<= 10;
34143+ }
34144+ if (ebda_addr && ebda_size) {
34145+ ebda_start = ebda_addr >> PAGE_SHIFT;
34146+ ebda_end = min((unsigned int)PAGE_ALIGN(ebda_addr + ebda_size), (unsigned int)0xa0000) >> PAGE_SHIFT;
34147+ } else {
34148+ ebda_start = 0x9f000 >> PAGE_SHIFT;
34149+ ebda_end = 0xa0000 >> PAGE_SHIFT;
34150+ }
34151+}
34152+#else
34153+static inline void gr_init_ebda(void) { }
34154+#endif
34155+
34156 void free_initmem(void)
34157 {
34158+#ifdef CONFIG_PAX_KERNEXEC
34159+#ifdef CONFIG_X86_32
34160+ /* PaX: limit KERNEL_CS to actual size */
34161+ unsigned long addr, limit;
34162+ struct desc_struct d;
34163+ int cpu;
34164+#else
34165+ pgd_t *pgd;
34166+ pud_t *pud;
34167+ pmd_t *pmd;
34168+ unsigned long addr, end;
34169+#endif
34170+#endif
34171+
34172+ gr_init_ebda();
34173+
34174+#ifdef CONFIG_PAX_KERNEXEC
34175+#ifdef CONFIG_X86_32
34176+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
34177+ limit = (limit - 1UL) >> PAGE_SHIFT;
34178+
34179+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
34180+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
34181+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
34182+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
34183+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEXEC_KERNEL_CS, &d, DESCTYPE_S);
34184+ }
34185+
34186+ /* PaX: make KERNEL_CS read-only */
34187+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
34188+ if (!paravirt_enabled())
34189+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
34190+/*
34191+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
34192+ pgd = pgd_offset_k(addr);
34193+ pud = pud_offset(pgd, addr);
34194+ pmd = pmd_offset(pud, addr);
34195+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
34196+ }
34197+*/
34198+#ifdef CONFIG_X86_PAE
34199+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
34200+/*
34201+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
34202+ pgd = pgd_offset_k(addr);
34203+ pud = pud_offset(pgd, addr);
34204+ pmd = pmd_offset(pud, addr);
34205+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
34206+ }
34207+*/
34208+#endif
34209+
34210+#ifdef CONFIG_MODULES
34211+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
34212+#endif
34213+
34214+#else
34215+ /* PaX: make kernel code/rodata read-only, rest non-executable */
34216+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
34217+ pgd = pgd_offset_k(addr);
34218+ pud = pud_offset(pgd, addr);
34219+ pmd = pmd_offset(pud, addr);
34220+ if (!pmd_present(*pmd))
34221+ continue;
34222+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
34223+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
34224+ else
34225+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
34226+ }
34227+
34228+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
34229+ end = addr + KERNEL_IMAGE_SIZE;
34230+ for (; addr < end; addr += PMD_SIZE) {
34231+ pgd = pgd_offset_k(addr);
34232+ pud = pud_offset(pgd, addr);
34233+ pmd = pmd_offset(pud, addr);
34234+ if (!pmd_present(*pmd))
34235+ continue;
34236+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
34237+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
34238+ }
34239+#endif
34240+
34241+ flush_tlb_all();
34242+#endif
34243+
34244 free_init_pages("unused kernel",
34245 (unsigned long)(&__init_begin),
34246 (unsigned long)(&__init_end));
34247diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
34248index 7d05565..bfc5338 100644
34249--- a/arch/x86/mm/init_32.c
34250+++ b/arch/x86/mm/init_32.c
34251@@ -62,33 +62,6 @@ static noinline int do_test_wp_bit(void);
34252 bool __read_mostly __vmalloc_start_set = false;
34253
34254 /*
34255- * Creates a middle page table and puts a pointer to it in the
34256- * given global directory entry. This only returns the gd entry
34257- * in non-PAE compilation mode, since the middle layer is folded.
34258- */
34259-static pmd_t * __init one_md_table_init(pgd_t *pgd)
34260-{
34261- pud_t *pud;
34262- pmd_t *pmd_table;
34263-
34264-#ifdef CONFIG_X86_PAE
34265- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
34266- pmd_table = (pmd_t *)alloc_low_page();
34267- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
34268- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
34269- pud = pud_offset(pgd, 0);
34270- BUG_ON(pmd_table != pmd_offset(pud, 0));
34271-
34272- return pmd_table;
34273- }
34274-#endif
34275- pud = pud_offset(pgd, 0);
34276- pmd_table = pmd_offset(pud, 0);
34277-
34278- return pmd_table;
34279-}
34280-
34281-/*
34282 * Create a page table and place a pointer to it in a middle page
34283 * directory entry:
34284 */
34285@@ -98,13 +71,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
34286 pte_t *page_table = (pte_t *)alloc_low_page();
34287
34288 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
34289+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
34290+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
34291+#else
34292 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
34293+#endif
34294 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
34295 }
34296
34297 return pte_offset_kernel(pmd, 0);
34298 }
34299
34300+static pmd_t * __init one_md_table_init(pgd_t *pgd)
34301+{
34302+ pud_t *pud;
34303+ pmd_t *pmd_table;
34304+
34305+ pud = pud_offset(pgd, 0);
34306+ pmd_table = pmd_offset(pud, 0);
34307+
34308+ return pmd_table;
34309+}
34310+
34311 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
34312 {
34313 int pgd_idx = pgd_index(vaddr);
34314@@ -208,6 +196,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
34315 int pgd_idx, pmd_idx;
34316 unsigned long vaddr;
34317 pgd_t *pgd;
34318+ pud_t *pud;
34319 pmd_t *pmd;
34320 pte_t *pte = NULL;
34321 unsigned long count = page_table_range_init_count(start, end);
34322@@ -222,8 +211,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
34323 pgd = pgd_base + pgd_idx;
34324
34325 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
34326- pmd = one_md_table_init(pgd);
34327- pmd = pmd + pmd_index(vaddr);
34328+ pud = pud_offset(pgd, vaddr);
34329+ pmd = pmd_offset(pud, vaddr);
34330+
34331+#ifdef CONFIG_X86_PAE
34332+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
34333+#endif
34334+
34335 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
34336 pmd++, pmd_idx++) {
34337 pte = page_table_kmap_check(one_page_table_init(pmd),
34338@@ -235,11 +229,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
34339 }
34340 }
34341
34342-static inline int is_kernel_text(unsigned long addr)
34343+static inline int is_kernel_text(unsigned long start, unsigned long end)
34344 {
34345- if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
34346- return 1;
34347- return 0;
34348+ if ((start >= ktla_ktva((unsigned long)_etext) ||
34349+ end <= ktla_ktva((unsigned long)_stext)) &&
34350+ (start >= ktla_ktva((unsigned long)_einittext) ||
34351+ end <= ktla_ktva((unsigned long)_sinittext)) &&
34352+
34353+#ifdef CONFIG_ACPI_SLEEP
34354+ (start >= (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
34355+#endif
34356+
34357+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
34358+ return 0;
34359+ return 1;
34360 }
34361
34362 /*
34363@@ -256,9 +259,10 @@ kernel_physical_mapping_init(unsigned long start,
34364 unsigned long last_map_addr = end;
34365 unsigned long start_pfn, end_pfn;
34366 pgd_t *pgd_base = swapper_pg_dir;
34367- int pgd_idx, pmd_idx, pte_ofs;
34368+ unsigned int pgd_idx, pmd_idx, pte_ofs;
34369 unsigned long pfn;
34370 pgd_t *pgd;
34371+ pud_t *pud;
34372 pmd_t *pmd;
34373 pte_t *pte;
34374 unsigned pages_2m, pages_4k;
34375@@ -291,8 +295,13 @@ repeat:
34376 pfn = start_pfn;
34377 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
34378 pgd = pgd_base + pgd_idx;
34379- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
34380- pmd = one_md_table_init(pgd);
34381+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
34382+ pud = pud_offset(pgd, 0);
34383+ pmd = pmd_offset(pud, 0);
34384+
34385+#ifdef CONFIG_X86_PAE
34386+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
34387+#endif
34388
34389 if (pfn >= end_pfn)
34390 continue;
34391@@ -304,14 +313,13 @@ repeat:
34392 #endif
34393 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
34394 pmd++, pmd_idx++) {
34395- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
34396+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
34397
34398 /*
34399 * Map with big pages if possible, otherwise
34400 * create normal page tables:
34401 */
34402 if (use_pse) {
34403- unsigned int addr2;
34404 pgprot_t prot = PAGE_KERNEL_LARGE;
34405 /*
34406 * first pass will use the same initial
34407@@ -322,11 +330,7 @@ repeat:
34408 _PAGE_PSE);
34409
34410 pfn &= PMD_MASK >> PAGE_SHIFT;
34411- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
34412- PAGE_OFFSET + PAGE_SIZE-1;
34413-
34414- if (is_kernel_text(addr) ||
34415- is_kernel_text(addr2))
34416+ if (is_kernel_text(address, address + PMD_SIZE))
34417 prot = PAGE_KERNEL_LARGE_EXEC;
34418
34419 pages_2m++;
34420@@ -343,7 +347,7 @@ repeat:
34421 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
34422 pte += pte_ofs;
34423 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
34424- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
34425+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
34426 pgprot_t prot = PAGE_KERNEL;
34427 /*
34428 * first pass will use the same initial
34429@@ -351,7 +355,7 @@ repeat:
34430 */
34431 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
34432
34433- if (is_kernel_text(addr))
34434+ if (is_kernel_text(address, address + PAGE_SIZE))
34435 prot = PAGE_KERNEL_EXEC;
34436
34437 pages_4k++;
34438@@ -474,7 +478,7 @@ void __init native_pagetable_init(void)
34439
34440 pud = pud_offset(pgd, va);
34441 pmd = pmd_offset(pud, va);
34442- if (!pmd_present(*pmd))
34443+ if (!pmd_present(*pmd)) // PAX TODO || pmd_large(*pmd))
34444 break;
34445
34446 /* should not be large page here */
34447@@ -532,12 +536,10 @@ void __init early_ioremap_page_table_range_init(void)
34448
34449 static void __init pagetable_init(void)
34450 {
34451- pgd_t *pgd_base = swapper_pg_dir;
34452-
34453- permanent_kmaps_init(pgd_base);
34454+ permanent_kmaps_init(swapper_pg_dir);
34455 }
34456
34457-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
34458+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
34459 EXPORT_SYMBOL_GPL(__supported_pte_mask);
34460
34461 /* user-defined highmem size */
34462@@ -787,10 +789,10 @@ void __init mem_init(void)
34463 ((unsigned long)&__init_end -
34464 (unsigned long)&__init_begin) >> 10,
34465
34466- (unsigned long)&_etext, (unsigned long)&_edata,
34467- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
34468+ (unsigned long)&_sdata, (unsigned long)&_edata,
34469+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
34470
34471- (unsigned long)&_text, (unsigned long)&_etext,
34472+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
34473 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
34474
34475 /*
34476@@ -884,6 +886,7 @@ void set_kernel_text_rw(void)
34477 if (!kernel_set_to_readonly)
34478 return;
34479
34480+ start = ktla_ktva(start);
34481 pr_debug("Set kernel text: %lx - %lx for read write\n",
34482 start, start+size);
34483
34484@@ -898,6 +901,7 @@ void set_kernel_text_ro(void)
34485 if (!kernel_set_to_readonly)
34486 return;
34487
34488+ start = ktla_ktva(start);
34489 pr_debug("Set kernel text: %lx - %lx for read only\n",
34490 start, start+size);
34491
34492@@ -926,6 +930,7 @@ void mark_rodata_ro(void)
34493 unsigned long start = PFN_ALIGN(_text);
34494 unsigned long size = PFN_ALIGN(_etext) - start;
34495
34496+ start = ktla_ktva(start);
34497 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
34498 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
34499 size >> 10);
34500diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
34501index 5621c47..5e17b7390 100644
34502--- a/arch/x86/mm/init_64.c
34503+++ b/arch/x86/mm/init_64.c
34504@@ -151,7 +151,7 @@ early_param("gbpages", parse_direct_gbpages_on);
34505 * around without checking the pgd every time.
34506 */
34507
34508-pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
34509+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
34510 EXPORT_SYMBOL_GPL(__supported_pte_mask);
34511
34512 int force_personality32;
34513@@ -184,12 +184,29 @@ void sync_global_pgds(unsigned long start, unsigned long end)
34514
34515 for (address = start; address <= end; address += PGDIR_SIZE) {
34516 const pgd_t *pgd_ref = pgd_offset_k(address);
34517+
34518+#ifdef CONFIG_PAX_PER_CPU_PGD
34519+ unsigned long cpu;
34520+#else
34521 struct page *page;
34522+#endif
34523
34524 if (pgd_none(*pgd_ref))
34525 continue;
34526
34527 spin_lock(&pgd_lock);
34528+
34529+#ifdef CONFIG_PAX_PER_CPU_PGD
34530+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
34531+ pgd_t *pgd = pgd_offset_cpu(cpu, user, address);
34532+
34533+ if (pgd_none(*pgd))
34534+ set_pgd(pgd, *pgd_ref);
34535+ else
34536+ BUG_ON(pgd_page_vaddr(*pgd)
34537+ != pgd_page_vaddr(*pgd_ref));
34538+ pgd = pgd_offset_cpu(cpu, kernel, address);
34539+#else
34540 list_for_each_entry(page, &pgd_list, lru) {
34541 pgd_t *pgd;
34542 spinlock_t *pgt_lock;
34543@@ -198,6 +215,7 @@ void sync_global_pgds(unsigned long start, unsigned long end)
34544 /* the pgt_lock only for Xen */
34545 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
34546 spin_lock(pgt_lock);
34547+#endif
34548
34549 if (pgd_none(*pgd))
34550 set_pgd(pgd, *pgd_ref);
34551@@ -205,7 +223,10 @@ void sync_global_pgds(unsigned long start, unsigned long end)
34552 BUG_ON(pgd_page_vaddr(*pgd)
34553 != pgd_page_vaddr(*pgd_ref));
34554
34555+#ifndef CONFIG_PAX_PER_CPU_PGD
34556 spin_unlock(pgt_lock);
34557+#endif
34558+
34559 }
34560 spin_unlock(&pgd_lock);
34561 }
34562@@ -238,7 +259,7 @@ static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr)
34563 {
34564 if (pgd_none(*pgd)) {
34565 pud_t *pud = (pud_t *)spp_getpage();
34566- pgd_populate(&init_mm, pgd, pud);
34567+ pgd_populate_kernel(&init_mm, pgd, pud);
34568 if (pud != pud_offset(pgd, 0))
34569 printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
34570 pud, pud_offset(pgd, 0));
34571@@ -250,7 +271,7 @@ static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
34572 {
34573 if (pud_none(*pud)) {
34574 pmd_t *pmd = (pmd_t *) spp_getpage();
34575- pud_populate(&init_mm, pud, pmd);
34576+ pud_populate_kernel(&init_mm, pud, pmd);
34577 if (pmd != pmd_offset(pud, 0))
34578 printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
34579 pmd, pmd_offset(pud, 0));
34580@@ -279,7 +300,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
34581 pmd = fill_pmd(pud, vaddr);
34582 pte = fill_pte(pmd, vaddr);
34583
34584+ pax_open_kernel();
34585 set_pte(pte, new_pte);
34586+ pax_close_kernel();
34587
34588 /*
34589 * It's enough to flush this one mapping.
34590@@ -338,14 +361,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
34591 pgd = pgd_offset_k((unsigned long)__va(phys));
34592 if (pgd_none(*pgd)) {
34593 pud = (pud_t *) spp_getpage();
34594- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
34595- _PAGE_USER));
34596+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
34597 }
34598 pud = pud_offset(pgd, (unsigned long)__va(phys));
34599 if (pud_none(*pud)) {
34600 pmd = (pmd_t *) spp_getpage();
34601- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
34602- _PAGE_USER));
34603+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
34604 }
34605 pmd = pmd_offset(pud, phys);
34606 BUG_ON(!pmd_none(*pmd));
34607@@ -586,7 +607,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
34608 prot);
34609
34610 spin_lock(&init_mm.page_table_lock);
34611- pud_populate(&init_mm, pud, pmd);
34612+ pud_populate_kernel(&init_mm, pud, pmd);
34613 spin_unlock(&init_mm.page_table_lock);
34614 }
34615 __flush_tlb_all();
34616@@ -627,7 +648,7 @@ kernel_physical_mapping_init(unsigned long start,
34617 page_size_mask);
34618
34619 spin_lock(&init_mm.page_table_lock);
34620- pgd_populate(&init_mm, pgd, pud);
34621+ pgd_populate_kernel(&init_mm, pgd, pud);
34622 spin_unlock(&init_mm.page_table_lock);
34623 pgd_changed = true;
34624 }
34625@@ -1196,8 +1217,8 @@ static struct vm_operations_struct gate_vma_ops = {
34626 static struct vm_area_struct gate_vma = {
34627 .vm_start = VSYSCALL_ADDR,
34628 .vm_end = VSYSCALL_ADDR + PAGE_SIZE,
34629- .vm_page_prot = PAGE_READONLY_EXEC,
34630- .vm_flags = VM_READ | VM_EXEC,
34631+ .vm_page_prot = PAGE_READONLY,
34632+ .vm_flags = VM_READ,
34633 .vm_ops = &gate_vma_ops,
34634 };
34635
34636diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
34637index 7b179b49..6bd17777 100644
34638--- a/arch/x86/mm/iomap_32.c
34639+++ b/arch/x86/mm/iomap_32.c
34640@@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
34641 type = kmap_atomic_idx_push();
34642 idx = type + KM_TYPE_NR * smp_processor_id();
34643 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
34644+
34645+ pax_open_kernel();
34646 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
34647+ pax_close_kernel();
34648+
34649 arch_flush_lazy_mmu_mode();
34650
34651 return (void *)vaddr;
34652diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
34653index baff1da..2816ef4 100644
34654--- a/arch/x86/mm/ioremap.c
34655+++ b/arch/x86/mm/ioremap.c
34656@@ -56,8 +56,8 @@ static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
34657 unsigned long i;
34658
34659 for (i = 0; i < nr_pages; ++i)
34660- if (pfn_valid(start_pfn + i) &&
34661- !PageReserved(pfn_to_page(start_pfn + i)))
34662+ if (pfn_valid(start_pfn + i) && (start_pfn + i >= 0x100 ||
34663+ !PageReserved(pfn_to_page(start_pfn + i))))
34664 return 1;
34665
34666 WARN_ONCE(1, "ioremap on RAM pfn 0x%lx\n", start_pfn);
34667@@ -268,7 +268,7 @@ EXPORT_SYMBOL(ioremap_prot);
34668 *
34669 * Caller must ensure there is only one unmapping for the same pointer.
34670 */
34671-void iounmap(volatile void __iomem *addr)
34672+void iounmap(const volatile void __iomem *addr)
34673 {
34674 struct vm_struct *p, *o;
34675
34676@@ -322,6 +322,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
34677
34678 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
34679 if (page_is_ram(start >> PAGE_SHIFT))
34680+#ifdef CONFIG_HIGHMEM
34681+ if ((start >> PAGE_SHIFT) < max_low_pfn)
34682+#endif
34683 return __va(phys);
34684
34685 addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
34686@@ -334,13 +337,16 @@ void *xlate_dev_mem_ptr(unsigned long phys)
34687 void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
34688 {
34689 if (page_is_ram(phys >> PAGE_SHIFT))
34690+#ifdef CONFIG_HIGHMEM
34691+ if ((phys >> PAGE_SHIFT) < max_low_pfn)
34692+#endif
34693 return;
34694
34695 iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
34696 return;
34697 }
34698
34699-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
34700+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
34701
34702 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
34703 {
34704@@ -376,8 +382,7 @@ void __init early_ioremap_init(void)
34705 early_ioremap_setup();
34706
34707 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
34708- memset(bm_pte, 0, sizeof(bm_pte));
34709- pmd_populate_kernel(&init_mm, pmd, bm_pte);
34710+ pmd_populate_user(&init_mm, pmd, bm_pte);
34711
34712 /*
34713 * The boot-ioremap range spans multiple pmds, for which
34714diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
34715index dd89a13..d77bdcc 100644
34716--- a/arch/x86/mm/kmemcheck/kmemcheck.c
34717+++ b/arch/x86/mm/kmemcheck/kmemcheck.c
34718@@ -628,9 +628,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
34719 * memory (e.g. tracked pages)? For now, we need this to avoid
34720 * invoking kmemcheck for PnP BIOS calls.
34721 */
34722- if (regs->flags & X86_VM_MASK)
34723+ if (v8086_mode(regs))
34724 return false;
34725- if (regs->cs != __KERNEL_CS)
34726+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
34727 return false;
34728
34729 pte = kmemcheck_pte_lookup(address);
34730diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
34731index 919b912..9267313 100644
34732--- a/arch/x86/mm/mmap.c
34733+++ b/arch/x86/mm/mmap.c
34734@@ -52,7 +52,7 @@ static unsigned int stack_maxrandom_size(void)
34735 * Leave an at least ~128 MB hole with possible stack randomization.
34736 */
34737 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
34738-#define MAX_GAP (TASK_SIZE/6*5)
34739+#define MAX_GAP (pax_task_size/6*5)
34740
34741 static int mmap_is_legacy(void)
34742 {
34743@@ -82,27 +82,40 @@ static unsigned long mmap_rnd(void)
34744 return rnd << PAGE_SHIFT;
34745 }
34746
34747-static unsigned long mmap_base(void)
34748+static unsigned long mmap_base(struct mm_struct *mm)
34749 {
34750 unsigned long gap = rlimit(RLIMIT_STACK);
34751+ unsigned long pax_task_size = TASK_SIZE;
34752+
34753+#ifdef CONFIG_PAX_SEGMEXEC
34754+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
34755+ pax_task_size = SEGMEXEC_TASK_SIZE;
34756+#endif
34757
34758 if (gap < MIN_GAP)
34759 gap = MIN_GAP;
34760 else if (gap > MAX_GAP)
34761 gap = MAX_GAP;
34762
34763- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
34764+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
34765 }
34766
34767 /*
34768 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
34769 * does, but not when emulating X86_32
34770 */
34771-static unsigned long mmap_legacy_base(void)
34772+static unsigned long mmap_legacy_base(struct mm_struct *mm)
34773 {
34774- if (mmap_is_ia32())
34775+ if (mmap_is_ia32()) {
34776+
34777+#ifdef CONFIG_PAX_SEGMEXEC
34778+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
34779+ return SEGMEXEC_TASK_UNMAPPED_BASE;
34780+ else
34781+#endif
34782+
34783 return TASK_UNMAPPED_BASE;
34784- else
34785+ } else
34786 return TASK_UNMAPPED_BASE + mmap_rnd();
34787 }
34788
34789@@ -112,8 +125,15 @@ static unsigned long mmap_legacy_base(void)
34790 */
34791 void arch_pick_mmap_layout(struct mm_struct *mm)
34792 {
34793- mm->mmap_legacy_base = mmap_legacy_base();
34794- mm->mmap_base = mmap_base();
34795+ mm->mmap_legacy_base = mmap_legacy_base(mm);
34796+ mm->mmap_base = mmap_base(mm);
34797+
34798+#ifdef CONFIG_PAX_RANDMMAP
34799+ if (mm->pax_flags & MF_PAX_RANDMMAP) {
34800+ mm->mmap_legacy_base += mm->delta_mmap;
34801+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
34802+ }
34803+#endif
34804
34805 if (mmap_is_legacy()) {
34806 mm->mmap_base = mm->mmap_legacy_base;
34807diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
34808index 0057a7a..95c7edd 100644
34809--- a/arch/x86/mm/mmio-mod.c
34810+++ b/arch/x86/mm/mmio-mod.c
34811@@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
34812 break;
34813 default:
34814 {
34815- unsigned char *ip = (unsigned char *)instptr;
34816+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
34817 my_trace->opcode = MMIO_UNKNOWN_OP;
34818 my_trace->width = 0;
34819 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
34820@@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
34821 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
34822 void __iomem *addr)
34823 {
34824- static atomic_t next_id;
34825+ static atomic_unchecked_t next_id;
34826 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
34827 /* These are page-unaligned. */
34828 struct mmiotrace_map map = {
34829@@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
34830 .private = trace
34831 },
34832 .phys = offset,
34833- .id = atomic_inc_return(&next_id)
34834+ .id = atomic_inc_return_unchecked(&next_id)
34835 };
34836 map.map_id = trace->id;
34837
34838@@ -290,7 +290,7 @@ void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
34839 ioremap_trace_core(offset, size, addr);
34840 }
34841
34842-static void iounmap_trace_core(volatile void __iomem *addr)
34843+static void iounmap_trace_core(const volatile void __iomem *addr)
34844 {
34845 struct mmiotrace_map map = {
34846 .phys = 0,
34847@@ -328,7 +328,7 @@ not_enabled:
34848 }
34849 }
34850
34851-void mmiotrace_iounmap(volatile void __iomem *addr)
34852+void mmiotrace_iounmap(const volatile void __iomem *addr)
34853 {
34854 might_sleep();
34855 if (is_enabled()) /* recheck and proper locking in *_core() */
34856diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
34857index a32b706..efb308b 100644
34858--- a/arch/x86/mm/numa.c
34859+++ b/arch/x86/mm/numa.c
34860@@ -478,7 +478,7 @@ static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi)
34861 return true;
34862 }
34863
34864-static int __init numa_register_memblks(struct numa_meminfo *mi)
34865+static int __init __intentional_overflow(-1) numa_register_memblks(struct numa_meminfo *mi)
34866 {
34867 unsigned long uninitialized_var(pfn_align);
34868 int i, nid;
34869diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
34870index ae242a7..1c7998f 100644
34871--- a/arch/x86/mm/pageattr.c
34872+++ b/arch/x86/mm/pageattr.c
34873@@ -262,7 +262,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
34874 */
34875 #ifdef CONFIG_PCI_BIOS
34876 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
34877- pgprot_val(forbidden) |= _PAGE_NX;
34878+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
34879 #endif
34880
34881 /*
34882@@ -270,9 +270,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
34883 * Does not cover __inittext since that is gone later on. On
34884 * 64bit we do not enforce !NX on the low mapping
34885 */
34886- if (within(address, (unsigned long)_text, (unsigned long)_etext))
34887- pgprot_val(forbidden) |= _PAGE_NX;
34888+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
34889+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
34890
34891+#ifdef CONFIG_DEBUG_RODATA
34892 /*
34893 * The .rodata section needs to be read-only. Using the pfn
34894 * catches all aliases.
34895@@ -280,6 +281,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
34896 if (within(pfn, __pa_symbol(__start_rodata) >> PAGE_SHIFT,
34897 __pa_symbol(__end_rodata) >> PAGE_SHIFT))
34898 pgprot_val(forbidden) |= _PAGE_RW;
34899+#endif
34900
34901 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
34902 /*
34903@@ -318,6 +320,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
34904 }
34905 #endif
34906
34907+#ifdef CONFIG_PAX_KERNEXEC
34908+ if (within(pfn, __pa(ktla_ktva((unsigned long)&_text)), __pa((unsigned long)&_sdata))) {
34909+ pgprot_val(forbidden) |= _PAGE_RW;
34910+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
34911+ }
34912+#endif
34913+
34914 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
34915
34916 return prot;
34917@@ -420,23 +429,37 @@ EXPORT_SYMBOL_GPL(slow_virt_to_phys);
34918 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
34919 {
34920 /* change init_mm */
34921+ pax_open_kernel();
34922 set_pte_atomic(kpte, pte);
34923+
34924 #ifdef CONFIG_X86_32
34925 if (!SHARED_KERNEL_PMD) {
34926+
34927+#ifdef CONFIG_PAX_PER_CPU_PGD
34928+ unsigned long cpu;
34929+#else
34930 struct page *page;
34931+#endif
34932
34933+#ifdef CONFIG_PAX_PER_CPU_PGD
34934+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
34935+ pgd_t *pgd = get_cpu_pgd(cpu, kernel);
34936+#else
34937 list_for_each_entry(page, &pgd_list, lru) {
34938- pgd_t *pgd;
34939+ pgd_t *pgd = (pgd_t *)page_address(page);
34940+#endif
34941+
34942 pud_t *pud;
34943 pmd_t *pmd;
34944
34945- pgd = (pgd_t *)page_address(page) + pgd_index(address);
34946+ pgd += pgd_index(address);
34947 pud = pud_offset(pgd, address);
34948 pmd = pmd_offset(pud, address);
34949 set_pte_atomic((pte_t *)pmd, pte);
34950 }
34951 }
34952 #endif
34953+ pax_close_kernel();
34954 }
34955
34956 static int
34957diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
34958index 6574388..87e9bef 100644
34959--- a/arch/x86/mm/pat.c
34960+++ b/arch/x86/mm/pat.c
34961@@ -376,7 +376,7 @@ int free_memtype(u64 start, u64 end)
34962
34963 if (!entry) {
34964 printk(KERN_INFO "%s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n",
34965- current->comm, current->pid, start, end - 1);
34966+ current->comm, task_pid_nr(current), start, end - 1);
34967 return -EINVAL;
34968 }
34969
34970@@ -506,8 +506,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
34971
34972 while (cursor < to) {
34973 if (!devmem_is_allowed(pfn)) {
34974- printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx]\n",
34975- current->comm, from, to - 1);
34976+ printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx] (%#010Lx)\n",
34977+ current->comm, from, to - 1, cursor);
34978 return 0;
34979 }
34980 cursor += PAGE_SIZE;
34981@@ -577,7 +577,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
34982 if (ioremap_change_attr((unsigned long)__va(base), id_sz, flags) < 0) {
34983 printk(KERN_INFO "%s:%d ioremap_change_attr failed %s "
34984 "for [mem %#010Lx-%#010Lx]\n",
34985- current->comm, current->pid,
34986+ current->comm, task_pid_nr(current),
34987 cattr_name(flags),
34988 base, (unsigned long long)(base + size-1));
34989 return -EINVAL;
34990@@ -612,7 +612,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
34991 flags = lookup_memtype(paddr);
34992 if (want_flags != flags) {
34993 printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
34994- current->comm, current->pid,
34995+ current->comm, task_pid_nr(current),
34996 cattr_name(want_flags),
34997 (unsigned long long)paddr,
34998 (unsigned long long)(paddr + size - 1),
34999@@ -634,7 +634,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
35000 free_memtype(paddr, paddr + size);
35001 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
35002 " for [mem %#010Lx-%#010Lx], got %s\n",
35003- current->comm, current->pid,
35004+ current->comm, task_pid_nr(current),
35005 cattr_name(want_flags),
35006 (unsigned long long)paddr,
35007 (unsigned long long)(paddr + size - 1),
35008diff --git a/arch/x86/mm/pat_rbtree.c b/arch/x86/mm/pat_rbtree.c
35009index 415f6c4..d319983 100644
35010--- a/arch/x86/mm/pat_rbtree.c
35011+++ b/arch/x86/mm/pat_rbtree.c
35012@@ -160,7 +160,7 @@ success:
35013
35014 failure:
35015 printk(KERN_INFO "%s:%d conflicting memory types "
35016- "%Lx-%Lx %s<->%s\n", current->comm, current->pid, start,
35017+ "%Lx-%Lx %s<->%s\n", current->comm, task_pid_nr(current), start,
35018 end, cattr_name(found_type), cattr_name(match->type));
35019 return -EBUSY;
35020 }
35021diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
35022index 9f0614d..92ae64a 100644
35023--- a/arch/x86/mm/pf_in.c
35024+++ b/arch/x86/mm/pf_in.c
35025@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
35026 int i;
35027 enum reason_type rv = OTHERS;
35028
35029- p = (unsigned char *)ins_addr;
35030+ p = (unsigned char *)ktla_ktva(ins_addr);
35031 p += skip_prefix(p, &prf);
35032 p += get_opcode(p, &opcode);
35033
35034@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
35035 struct prefix_bits prf;
35036 int i;
35037
35038- p = (unsigned char *)ins_addr;
35039+ p = (unsigned char *)ktla_ktva(ins_addr);
35040 p += skip_prefix(p, &prf);
35041 p += get_opcode(p, &opcode);
35042
35043@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
35044 struct prefix_bits prf;
35045 int i;
35046
35047- p = (unsigned char *)ins_addr;
35048+ p = (unsigned char *)ktla_ktva(ins_addr);
35049 p += skip_prefix(p, &prf);
35050 p += get_opcode(p, &opcode);
35051
35052@@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
35053 struct prefix_bits prf;
35054 int i;
35055
35056- p = (unsigned char *)ins_addr;
35057+ p = (unsigned char *)ktla_ktva(ins_addr);
35058 p += skip_prefix(p, &prf);
35059 p += get_opcode(p, &opcode);
35060 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
35061@@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
35062 struct prefix_bits prf;
35063 int i;
35064
35065- p = (unsigned char *)ins_addr;
35066+ p = (unsigned char *)ktla_ktva(ins_addr);
35067 p += skip_prefix(p, &prf);
35068 p += get_opcode(p, &opcode);
35069 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
35070diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
35071index 6fb6927..4fc13c0 100644
35072--- a/arch/x86/mm/pgtable.c
35073+++ b/arch/x86/mm/pgtable.c
35074@@ -97,10 +97,71 @@ static inline void pgd_list_del(pgd_t *pgd)
35075 list_del(&page->lru);
35076 }
35077
35078-#define UNSHARED_PTRS_PER_PGD \
35079- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
35080+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
35081+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
35082
35083+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src)
35084+{
35085+ unsigned int count = USER_PGD_PTRS;
35086
35087+ if (!pax_user_shadow_base)
35088+ return;
35089+
35090+ while (count--)
35091+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
35092+}
35093+#endif
35094+
35095+#ifdef CONFIG_PAX_PER_CPU_PGD
35096+void __clone_user_pgds(pgd_t *dst, const pgd_t *src)
35097+{
35098+ unsigned int count = USER_PGD_PTRS;
35099+
35100+ while (count--) {
35101+ pgd_t pgd;
35102+
35103+#ifdef CONFIG_X86_64
35104+ pgd = __pgd(pgd_val(*src++) | _PAGE_USER);
35105+#else
35106+ pgd = *src++;
35107+#endif
35108+
35109+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
35110+ pgd = __pgd(pgd_val(pgd) & clone_pgd_mask);
35111+#endif
35112+
35113+ *dst++ = pgd;
35114+ }
35115+
35116+}
35117+#endif
35118+
35119+#ifdef CONFIG_X86_64
35120+#define pxd_t pud_t
35121+#define pyd_t pgd_t
35122+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
35123+#define pgtable_pxd_page_ctor(page) true
35124+#define pgtable_pxd_page_dtor(page)
35125+#define pxd_free(mm, pud) pud_free((mm), (pud))
35126+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
35127+#define pyd_offset(mm, address) pgd_offset((mm), (address))
35128+#define PYD_SIZE PGDIR_SIZE
35129+#else
35130+#define pxd_t pmd_t
35131+#define pyd_t pud_t
35132+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
35133+#define pgtable_pxd_page_ctor(page) pgtable_pmd_page_ctor(page)
35134+#define pgtable_pxd_page_dtor(page) pgtable_pmd_page_dtor(page)
35135+#define pxd_free(mm, pud) pmd_free((mm), (pud))
35136+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
35137+#define pyd_offset(mm, address) pud_offset((mm), (address))
35138+#define PYD_SIZE PUD_SIZE
35139+#endif
35140+
35141+#ifdef CONFIG_PAX_PER_CPU_PGD
35142+static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
35143+static inline void pgd_dtor(pgd_t *pgd) {}
35144+#else
35145 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
35146 {
35147 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
35148@@ -141,6 +202,7 @@ static void pgd_dtor(pgd_t *pgd)
35149 pgd_list_del(pgd);
35150 spin_unlock(&pgd_lock);
35151 }
35152+#endif
35153
35154 /*
35155 * List of all pgd's needed for non-PAE so it can invalidate entries
35156@@ -153,7 +215,7 @@ static void pgd_dtor(pgd_t *pgd)
35157 * -- nyc
35158 */
35159
35160-#ifdef CONFIG_X86_PAE
35161+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
35162 /*
35163 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
35164 * updating the top-level pagetable entries to guarantee the
35165@@ -165,7 +227,7 @@ static void pgd_dtor(pgd_t *pgd)
35166 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
35167 * and initialize the kernel pmds here.
35168 */
35169-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
35170+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
35171
35172 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
35173 {
35174@@ -183,43 +245,45 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
35175 */
35176 flush_tlb_mm(mm);
35177 }
35178+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
35179+#define PREALLOCATED_PXDS USER_PGD_PTRS
35180 #else /* !CONFIG_X86_PAE */
35181
35182 /* No need to prepopulate any pagetable entries in non-PAE modes. */
35183-#define PREALLOCATED_PMDS 0
35184+#define PREALLOCATED_PXDS 0
35185
35186 #endif /* CONFIG_X86_PAE */
35187
35188-static void free_pmds(pmd_t *pmds[])
35189+static void free_pxds(pxd_t *pxds[])
35190 {
35191 int i;
35192
35193- for(i = 0; i < PREALLOCATED_PMDS; i++)
35194- if (pmds[i]) {
35195- pgtable_pmd_page_dtor(virt_to_page(pmds[i]));
35196- free_page((unsigned long)pmds[i]);
35197+ for(i = 0; i < PREALLOCATED_PXDS; i++)
35198+ if (pxds[i]) {
35199+ pgtable_pxd_page_dtor(virt_to_page(pxds[i]));
35200+ free_page((unsigned long)pxds[i]);
35201 }
35202 }
35203
35204-static int preallocate_pmds(pmd_t *pmds[])
35205+static int preallocate_pxds(pxd_t *pxds[])
35206 {
35207 int i;
35208 bool failed = false;
35209
35210- for(i = 0; i < PREALLOCATED_PMDS; i++) {
35211- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
35212- if (!pmd)
35213+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
35214+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
35215+ if (!pxd)
35216 failed = true;
35217- if (pmd && !pgtable_pmd_page_ctor(virt_to_page(pmd))) {
35218- free_page((unsigned long)pmd);
35219- pmd = NULL;
35220+ if (pxd && !pgtable_pxd_page_ctor(virt_to_page(pxd))) {
35221+ free_page((unsigned long)pxd);
35222+ pxd = NULL;
35223 failed = true;
35224 }
35225- pmds[i] = pmd;
35226+ pxds[i] = pxd;
35227 }
35228
35229 if (failed) {
35230- free_pmds(pmds);
35231+ free_pxds(pxds);
35232 return -ENOMEM;
35233 }
35234
35235@@ -232,49 +296,52 @@ static int preallocate_pmds(pmd_t *pmds[])
35236 * preallocate which never got a corresponding vma will need to be
35237 * freed manually.
35238 */
35239-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
35240+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
35241 {
35242 int i;
35243
35244- for(i = 0; i < PREALLOCATED_PMDS; i++) {
35245+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
35246 pgd_t pgd = pgdp[i];
35247
35248 if (pgd_val(pgd) != 0) {
35249- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
35250+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
35251
35252- pgdp[i] = native_make_pgd(0);
35253+ set_pgd(pgdp + i, native_make_pgd(0));
35254
35255- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
35256- pmd_free(mm, pmd);
35257+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
35258+ pxd_free(mm, pxd);
35259 }
35260 }
35261 }
35262
35263-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
35264+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
35265 {
35266- pud_t *pud;
35267+ pyd_t *pyd;
35268 int i;
35269
35270- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
35271+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
35272 return;
35273
35274- pud = pud_offset(pgd, 0);
35275-
35276- for (i = 0; i < PREALLOCATED_PMDS; i++, pud++) {
35277- pmd_t *pmd = pmds[i];
35278+#ifdef CONFIG_X86_64
35279+ pyd = pyd_offset(mm, 0L);
35280+#else
35281+ pyd = pyd_offset(pgd, 0L);
35282+#endif
35283
35284+ for (i = 0; i < PREALLOCATED_PXDS; i++, pyd++) {
35285+ pxd_t *pxd = pxds[i];
35286 if (i >= KERNEL_PGD_BOUNDARY)
35287- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
35288- sizeof(pmd_t) * PTRS_PER_PMD);
35289+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
35290+ sizeof(pxd_t) * PTRS_PER_PMD);
35291
35292- pud_populate(mm, pud, pmd);
35293+ pyd_populate(mm, pyd, pxd);
35294 }
35295 }
35296
35297 pgd_t *pgd_alloc(struct mm_struct *mm)
35298 {
35299 pgd_t *pgd;
35300- pmd_t *pmds[PREALLOCATED_PMDS];
35301+ pxd_t *pxds[PREALLOCATED_PXDS];
35302
35303 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
35304
35305@@ -283,11 +350,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
35306
35307 mm->pgd = pgd;
35308
35309- if (preallocate_pmds(pmds) != 0)
35310+ if (preallocate_pxds(pxds) != 0)
35311 goto out_free_pgd;
35312
35313 if (paravirt_pgd_alloc(mm) != 0)
35314- goto out_free_pmds;
35315+ goto out_free_pxds;
35316
35317 /*
35318 * Make sure that pre-populating the pmds is atomic with
35319@@ -297,14 +364,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
35320 spin_lock(&pgd_lock);
35321
35322 pgd_ctor(mm, pgd);
35323- pgd_prepopulate_pmd(mm, pgd, pmds);
35324+ pgd_prepopulate_pxd(mm, pgd, pxds);
35325
35326 spin_unlock(&pgd_lock);
35327
35328 return pgd;
35329
35330-out_free_pmds:
35331- free_pmds(pmds);
35332+out_free_pxds:
35333+ free_pxds(pxds);
35334 out_free_pgd:
35335 free_page((unsigned long)pgd);
35336 out:
35337@@ -313,7 +380,7 @@ out:
35338
35339 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
35340 {
35341- pgd_mop_up_pmds(mm, pgd);
35342+ pgd_mop_up_pxds(mm, pgd);
35343 pgd_dtor(pgd);
35344 paravirt_pgd_free(mm, pgd);
35345 free_page((unsigned long)pgd);
35346diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
35347index 4dd8cf6..f9d143e 100644
35348--- a/arch/x86/mm/pgtable_32.c
35349+++ b/arch/x86/mm/pgtable_32.c
35350@@ -47,10 +47,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
35351 return;
35352 }
35353 pte = pte_offset_kernel(pmd, vaddr);
35354+
35355+ pax_open_kernel();
35356 if (pte_val(pteval))
35357 set_pte_at(&init_mm, vaddr, pte, pteval);
35358 else
35359 pte_clear(&init_mm, vaddr, pte);
35360+ pax_close_kernel();
35361
35362 /*
35363 * It's enough to flush this one mapping.
35364diff --git a/arch/x86/mm/physaddr.c b/arch/x86/mm/physaddr.c
35365index e666cbb..61788c45 100644
35366--- a/arch/x86/mm/physaddr.c
35367+++ b/arch/x86/mm/physaddr.c
35368@@ -10,7 +10,7 @@
35369 #ifdef CONFIG_X86_64
35370
35371 #ifdef CONFIG_DEBUG_VIRTUAL
35372-unsigned long __phys_addr(unsigned long x)
35373+unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
35374 {
35375 unsigned long y = x - __START_KERNEL_map;
35376
35377@@ -67,7 +67,7 @@ EXPORT_SYMBOL(__virt_addr_valid);
35378 #else
35379
35380 #ifdef CONFIG_DEBUG_VIRTUAL
35381-unsigned long __phys_addr(unsigned long x)
35382+unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
35383 {
35384 unsigned long phys_addr = x - PAGE_OFFSET;
35385 /* VMALLOC_* aren't constants */
35386diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
35387index 90555bf..f5f1828 100644
35388--- a/arch/x86/mm/setup_nx.c
35389+++ b/arch/x86/mm/setup_nx.c
35390@@ -5,8 +5,10 @@
35391 #include <asm/pgtable.h>
35392 #include <asm/proto.h>
35393
35394+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
35395 static int disable_nx;
35396
35397+#ifndef CONFIG_PAX_PAGEEXEC
35398 /*
35399 * noexec = on|off
35400 *
35401@@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
35402 return 0;
35403 }
35404 early_param("noexec", noexec_setup);
35405+#endif
35406+
35407+#endif
35408
35409 void x86_configure_nx(void)
35410 {
35411+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
35412 if (cpu_has_nx && !disable_nx)
35413 __supported_pte_mask |= _PAGE_NX;
35414 else
35415+#endif
35416 __supported_pte_mask &= ~_PAGE_NX;
35417 }
35418
35419diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
35420index ee61c36..e6fedeb 100644
35421--- a/arch/x86/mm/tlb.c
35422+++ b/arch/x86/mm/tlb.c
35423@@ -48,7 +48,11 @@ void leave_mm(int cpu)
35424 BUG();
35425 if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
35426 cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
35427+
35428+#ifndef CONFIG_PAX_PER_CPU_PGD
35429 load_cr3(swapper_pg_dir);
35430+#endif
35431+
35432 /*
35433 * This gets called in the idle path where RCU
35434 * functions differently. Tracing normally
35435diff --git a/arch/x86/mm/uderef_64.c b/arch/x86/mm/uderef_64.c
35436new file mode 100644
35437index 0000000..dace51c
35438--- /dev/null
35439+++ b/arch/x86/mm/uderef_64.c
35440@@ -0,0 +1,37 @@
35441+#include <linux/mm.h>
35442+#include <asm/pgtable.h>
35443+#include <asm/uaccess.h>
35444+
35445+#ifdef CONFIG_PAX_MEMORY_UDEREF
35446+/* PaX: due to the special call convention these functions must
35447+ * - remain leaf functions under all configurations,
35448+ * - never be called directly, only dereferenced from the wrappers.
35449+ */
35450+void __pax_open_userland(void)
35451+{
35452+ unsigned int cpu;
35453+
35454+ if (unlikely(!segment_eq(get_fs(), USER_DS)))
35455+ return;
35456+
35457+ cpu = raw_get_cpu();
35458+ BUG_ON((read_cr3() & ~PAGE_MASK) != PCID_KERNEL);
35459+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER | PCID_NOFLUSH);
35460+ raw_put_cpu_no_resched();
35461+}
35462+EXPORT_SYMBOL(__pax_open_userland);
35463+
35464+void __pax_close_userland(void)
35465+{
35466+ unsigned int cpu;
35467+
35468+ if (unlikely(!segment_eq(get_fs(), USER_DS)))
35469+ return;
35470+
35471+ cpu = raw_get_cpu();
35472+ BUG_ON((read_cr3() & ~PAGE_MASK) != PCID_USER);
35473+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
35474+ raw_put_cpu_no_resched();
35475+}
35476+EXPORT_SYMBOL(__pax_close_userland);
35477+#endif
35478diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
35479index 6440221..f84b5c7 100644
35480--- a/arch/x86/net/bpf_jit.S
35481+++ b/arch/x86/net/bpf_jit.S
35482@@ -9,6 +9,7 @@
35483 */
35484 #include <linux/linkage.h>
35485 #include <asm/dwarf2.h>
35486+#include <asm/alternative-asm.h>
35487
35488 /*
35489 * Calling convention :
35490@@ -38,6 +39,7 @@ sk_load_word_positive_offset:
35491 jle bpf_slow_path_word
35492 mov (SKBDATA,%rsi),%eax
35493 bswap %eax /* ntohl() */
35494+ pax_force_retaddr
35495 ret
35496
35497 sk_load_half:
35498@@ -55,6 +57,7 @@ sk_load_half_positive_offset:
35499 jle bpf_slow_path_half
35500 movzwl (SKBDATA,%rsi),%eax
35501 rol $8,%ax # ntohs()
35502+ pax_force_retaddr
35503 ret
35504
35505 sk_load_byte:
35506@@ -69,6 +72,7 @@ sk_load_byte_positive_offset:
35507 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
35508 jle bpf_slow_path_byte
35509 movzbl (SKBDATA,%rsi),%eax
35510+ pax_force_retaddr
35511 ret
35512
35513 /* rsi contains offset and can be scratched */
35514@@ -90,6 +94,7 @@ bpf_slow_path_word:
35515 js bpf_error
35516 mov - MAX_BPF_STACK + 32(%rbp),%eax
35517 bswap %eax
35518+ pax_force_retaddr
35519 ret
35520
35521 bpf_slow_path_half:
35522@@ -98,12 +103,14 @@ bpf_slow_path_half:
35523 mov - MAX_BPF_STACK + 32(%rbp),%ax
35524 rol $8,%ax
35525 movzwl %ax,%eax
35526+ pax_force_retaddr
35527 ret
35528
35529 bpf_slow_path_byte:
35530 bpf_slow_path_common(1)
35531 js bpf_error
35532 movzbl - MAX_BPF_STACK + 32(%rbp),%eax
35533+ pax_force_retaddr
35534 ret
35535
35536 #define sk_negative_common(SIZE) \
35537@@ -126,6 +133,7 @@ sk_load_word_negative_offset:
35538 sk_negative_common(4)
35539 mov (%rax), %eax
35540 bswap %eax
35541+ pax_force_retaddr
35542 ret
35543
35544 bpf_slow_path_half_neg:
35545@@ -137,6 +145,7 @@ sk_load_half_negative_offset:
35546 mov (%rax),%ax
35547 rol $8,%ax
35548 movzwl %ax,%eax
35549+ pax_force_retaddr
35550 ret
35551
35552 bpf_slow_path_byte_neg:
35553@@ -146,6 +155,7 @@ sk_load_byte_negative_offset:
35554 .globl sk_load_byte_negative_offset
35555 sk_negative_common(1)
35556 movzbl (%rax), %eax
35557+ pax_force_retaddr
35558 ret
35559
35560 bpf_error:
35561@@ -156,4 +166,5 @@ bpf_error:
35562 mov - MAX_BPF_STACK + 16(%rbp),%r14
35563 mov - MAX_BPF_STACK + 24(%rbp),%r15
35564 leaveq
35565+ pax_force_retaddr
35566 ret
35567diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
35568index 5c8cb80..5fd7860 100644
35569--- a/arch/x86/net/bpf_jit_comp.c
35570+++ b/arch/x86/net/bpf_jit_comp.c
35571@@ -15,7 +15,11 @@
35572 #include <linux/if_vlan.h>
35573 #include <linux/random.h>
35574
35575+#ifdef CONFIG_GRKERNSEC_BPF_HARDEN
35576+int bpf_jit_enable __read_only;
35577+#else
35578 int bpf_jit_enable __read_mostly;
35579+#endif
35580
35581 /*
35582 * assembly code in arch/x86/net/bpf_jit.S
35583@@ -109,36 +113,32 @@ static inline void bpf_flush_icache(void *start, void *end)
35584 #define CHOOSE_LOAD_FUNC(K, func) \
35585 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
35586
35587-struct bpf_binary_header {
35588- unsigned int pages;
35589- /* Note : for security reasons, bpf code will follow a randomly
35590- * sized amount of int3 instructions
35591- */
35592- u8 image[];
35593-};
35594-
35595-static struct bpf_binary_header *bpf_alloc_binary(unsigned int proglen,
35596+/* Note : for security reasons, bpf code will follow a randomly
35597+ * sized amount of int3 instructions
35598+ */
35599+static u8 *bpf_alloc_binary(unsigned int proglen,
35600 u8 **image_ptr)
35601 {
35602 unsigned int sz, hole;
35603- struct bpf_binary_header *header;
35604+ u8 *header;
35605
35606 /* Most of BPF filters are really small,
35607 * but if some of them fill a page, allow at least
35608 * 128 extra bytes to insert a random section of int3
35609 */
35610- sz = round_up(proglen + sizeof(*header) + 128, PAGE_SIZE);
35611- header = module_alloc(sz);
35612+ sz = round_up(proglen + 128, PAGE_SIZE);
35613+ header = module_alloc_exec(sz);
35614 if (!header)
35615 return NULL;
35616
35617+ pax_open_kernel();
35618 memset(header, 0xcc, sz); /* fill whole space with int3 instructions */
35619+ pax_close_kernel();
35620
35621- header->pages = sz / PAGE_SIZE;
35622- hole = min(sz - (proglen + sizeof(*header)), PAGE_SIZE - sizeof(*header));
35623+ hole = PAGE_SIZE - (proglen & ~PAGE_MASK);
35624
35625 /* insert a random number of int3 instructions before BPF code */
35626- *image_ptr = &header->image[prandom_u32() % hole];
35627+ *image_ptr = &header[prandom_u32() % hole];
35628 return header;
35629 }
35630
35631@@ -853,7 +853,9 @@ common_load: ctx->seen_ld_abs = true;
35632 pr_err("bpf_jit_compile fatal error\n");
35633 return -EFAULT;
35634 }
35635+ pax_open_kernel();
35636 memcpy(image + proglen, temp, ilen);
35637+ pax_close_kernel();
35638 }
35639 proglen += ilen;
35640 addrs[i] = proglen;
35641@@ -868,7 +870,7 @@ void bpf_jit_compile(struct bpf_prog *prog)
35642
35643 void bpf_int_jit_compile(struct bpf_prog *prog)
35644 {
35645- struct bpf_binary_header *header = NULL;
35646+ u8 *header = NULL;
35647 int proglen, oldproglen = 0;
35648 struct jit_context ctx = {};
35649 u8 *image = NULL;
35650@@ -900,7 +902,7 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
35651 if (proglen <= 0) {
35652 image = NULL;
35653 if (header)
35654- module_free(NULL, header);
35655+ module_free_exec(NULL, image);
35656 goto out;
35657 }
35658 if (image) {
35659@@ -922,7 +924,6 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
35660
35661 if (image) {
35662 bpf_flush_icache(header, image + proglen);
35663- set_memory_ro((unsigned long)header, header->pages);
35664 prog->bpf_func = (void *)image;
35665 prog->jited = 1;
35666 }
35667@@ -930,23 +931,15 @@ out:
35668 kfree(addrs);
35669 }
35670
35671-static void bpf_jit_free_deferred(struct work_struct *work)
35672-{
35673- struct bpf_prog *fp = container_of(work, struct bpf_prog, work);
35674- unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
35675- struct bpf_binary_header *header = (void *)addr;
35676-
35677- set_memory_rw(addr, header->pages);
35678- module_free(NULL, header);
35679- kfree(fp);
35680-}
35681-
35682 void bpf_jit_free(struct bpf_prog *fp)
35683 {
35684- if (fp->jited) {
35685- INIT_WORK(&fp->work, bpf_jit_free_deferred);
35686- schedule_work(&fp->work);
35687- } else {
35688- kfree(fp);
35689- }
35690+ unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
35691+
35692+ if (!fp->jited)
35693+ goto free_filter;
35694+
35695+ module_free_exec(NULL, (void *)addr);
35696+
35697+free_filter:
35698+ bpf_prog_unlock_free(fp);
35699 }
35700diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
35701index 5d04be5..2beeaa2 100644
35702--- a/arch/x86/oprofile/backtrace.c
35703+++ b/arch/x86/oprofile/backtrace.c
35704@@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
35705 struct stack_frame_ia32 *fp;
35706 unsigned long bytes;
35707
35708- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
35709+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
35710 if (bytes != 0)
35711 return NULL;
35712
35713- fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
35714+ fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
35715
35716 oprofile_add_trace(bufhead[0].return_address);
35717
35718@@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
35719 struct stack_frame bufhead[2];
35720 unsigned long bytes;
35721
35722- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
35723+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
35724 if (bytes != 0)
35725 return NULL;
35726
35727@@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
35728 {
35729 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
35730
35731- if (!user_mode_vm(regs)) {
35732+ if (!user_mode(regs)) {
35733 unsigned long stack = kernel_stack_pointer(regs);
35734 if (depth)
35735 dump_trace(NULL, regs, (unsigned long *)stack, 0,
35736diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
35737index 379e8bd..6386e09 100644
35738--- a/arch/x86/oprofile/nmi_int.c
35739+++ b/arch/x86/oprofile/nmi_int.c
35740@@ -23,6 +23,7 @@
35741 #include <asm/nmi.h>
35742 #include <asm/msr.h>
35743 #include <asm/apic.h>
35744+#include <asm/pgtable.h>
35745
35746 #include "op_counter.h"
35747 #include "op_x86_model.h"
35748@@ -785,8 +786,11 @@ int __init op_nmi_init(struct oprofile_operations *ops)
35749 if (ret)
35750 return ret;
35751
35752- if (!model->num_virt_counters)
35753- model->num_virt_counters = model->num_counters;
35754+ if (!model->num_virt_counters) {
35755+ pax_open_kernel();
35756+ *(unsigned int *)&model->num_virt_counters = model->num_counters;
35757+ pax_close_kernel();
35758+ }
35759
35760 mux_init(ops);
35761
35762diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
35763index 50d86c0..7985318 100644
35764--- a/arch/x86/oprofile/op_model_amd.c
35765+++ b/arch/x86/oprofile/op_model_amd.c
35766@@ -519,9 +519,11 @@ static int op_amd_init(struct oprofile_operations *ops)
35767 num_counters = AMD64_NUM_COUNTERS;
35768 }
35769
35770- op_amd_spec.num_counters = num_counters;
35771- op_amd_spec.num_controls = num_counters;
35772- op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
35773+ pax_open_kernel();
35774+ *(unsigned int *)&op_amd_spec.num_counters = num_counters;
35775+ *(unsigned int *)&op_amd_spec.num_controls = num_counters;
35776+ *(unsigned int *)&op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
35777+ pax_close_kernel();
35778
35779 return 0;
35780 }
35781diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c
35782index d90528e..0127e2b 100644
35783--- a/arch/x86/oprofile/op_model_ppro.c
35784+++ b/arch/x86/oprofile/op_model_ppro.c
35785@@ -19,6 +19,7 @@
35786 #include <asm/msr.h>
35787 #include <asm/apic.h>
35788 #include <asm/nmi.h>
35789+#include <asm/pgtable.h>
35790
35791 #include "op_x86_model.h"
35792 #include "op_counter.h"
35793@@ -221,8 +222,10 @@ static void arch_perfmon_setup_counters(void)
35794
35795 num_counters = min((int)eax.split.num_counters, OP_MAX_COUNTER);
35796
35797- op_arch_perfmon_spec.num_counters = num_counters;
35798- op_arch_perfmon_spec.num_controls = num_counters;
35799+ pax_open_kernel();
35800+ *(unsigned int *)&op_arch_perfmon_spec.num_counters = num_counters;
35801+ *(unsigned int *)&op_arch_perfmon_spec.num_controls = num_counters;
35802+ pax_close_kernel();
35803 }
35804
35805 static int arch_perfmon_init(struct oprofile_operations *ignore)
35806diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h
35807index 71e8a67..6a313bb 100644
35808--- a/arch/x86/oprofile/op_x86_model.h
35809+++ b/arch/x86/oprofile/op_x86_model.h
35810@@ -52,7 +52,7 @@ struct op_x86_model_spec {
35811 void (*switch_ctrl)(struct op_x86_model_spec const *model,
35812 struct op_msrs const * const msrs);
35813 #endif
35814-};
35815+} __do_const;
35816
35817 struct op_counter_config;
35818
35819diff --git a/arch/x86/pci/intel_mid_pci.c b/arch/x86/pci/intel_mid_pci.c
35820index b9958c3..24229ab 100644
35821--- a/arch/x86/pci/intel_mid_pci.c
35822+++ b/arch/x86/pci/intel_mid_pci.c
35823@@ -250,7 +250,7 @@ int __init intel_mid_pci_init(void)
35824 pci_mmcfg_late_init();
35825 pcibios_enable_irq = intel_mid_pci_irq_enable;
35826 pcibios_disable_irq = intel_mid_pci_irq_disable;
35827- pci_root_ops = intel_mid_pci_ops;
35828+ memcpy((void *)&pci_root_ops, &intel_mid_pci_ops, sizeof pci_root_ops);
35829 pci_soc_mode = 1;
35830 /* Continue with standard init */
35831 return 1;
35832diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
35833index eb500c2..eab9e70 100644
35834--- a/arch/x86/pci/irq.c
35835+++ b/arch/x86/pci/irq.c
35836@@ -51,7 +51,7 @@ struct irq_router {
35837 struct irq_router_handler {
35838 u16 vendor;
35839 int (*probe)(struct irq_router *r, struct pci_dev *router, u16 device);
35840-};
35841+} __do_const;
35842
35843 int (*pcibios_enable_irq)(struct pci_dev *dev) = pirq_enable_irq;
35844 void (*pcibios_disable_irq)(struct pci_dev *dev) = pirq_disable_irq;
35845@@ -791,7 +791,7 @@ static __init int pico_router_probe(struct irq_router *r, struct pci_dev *router
35846 return 0;
35847 }
35848
35849-static __initdata struct irq_router_handler pirq_routers[] = {
35850+static __initconst const struct irq_router_handler pirq_routers[] = {
35851 { PCI_VENDOR_ID_INTEL, intel_router_probe },
35852 { PCI_VENDOR_ID_AL, ali_router_probe },
35853 { PCI_VENDOR_ID_ITE, ite_router_probe },
35854@@ -818,7 +818,7 @@ static struct pci_dev *pirq_router_dev;
35855 static void __init pirq_find_router(struct irq_router *r)
35856 {
35857 struct irq_routing_table *rt = pirq_table;
35858- struct irq_router_handler *h;
35859+ const struct irq_router_handler *h;
35860
35861 #ifdef CONFIG_PCI_BIOS
35862 if (!rt->signature) {
35863@@ -1091,7 +1091,7 @@ static int __init fix_acer_tm360_irqrouting(const struct dmi_system_id *d)
35864 return 0;
35865 }
35866
35867-static struct dmi_system_id __initdata pciirq_dmi_table[] = {
35868+static const struct dmi_system_id __initconst pciirq_dmi_table[] = {
35869 {
35870 .callback = fix_broken_hp_bios_irq9,
35871 .ident = "HP Pavilion N5400 Series Laptop",
35872diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
35873index c77b24a..c979855 100644
35874--- a/arch/x86/pci/pcbios.c
35875+++ b/arch/x86/pci/pcbios.c
35876@@ -79,7 +79,7 @@ union bios32 {
35877 static struct {
35878 unsigned long address;
35879 unsigned short segment;
35880-} bios32_indirect = { 0, __KERNEL_CS };
35881+} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
35882
35883 /*
35884 * Returns the entry point for the given service, NULL on error
35885@@ -92,37 +92,80 @@ static unsigned long bios32_service(unsigned long service)
35886 unsigned long length; /* %ecx */
35887 unsigned long entry; /* %edx */
35888 unsigned long flags;
35889+ struct desc_struct d, *gdt;
35890
35891 local_irq_save(flags);
35892- __asm__("lcall *(%%edi); cld"
35893+
35894+ gdt = get_cpu_gdt_table(smp_processor_id());
35895+
35896+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
35897+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
35898+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
35899+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
35900+
35901+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
35902 : "=a" (return_code),
35903 "=b" (address),
35904 "=c" (length),
35905 "=d" (entry)
35906 : "0" (service),
35907 "1" (0),
35908- "D" (&bios32_indirect));
35909+ "D" (&bios32_indirect),
35910+ "r"(__PCIBIOS_DS)
35911+ : "memory");
35912+
35913+ pax_open_kernel();
35914+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
35915+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
35916+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
35917+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
35918+ pax_close_kernel();
35919+
35920 local_irq_restore(flags);
35921
35922 switch (return_code) {
35923- case 0:
35924- return address + entry;
35925- case 0x80: /* Not present */
35926- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
35927- return 0;
35928- default: /* Shouldn't happen */
35929- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
35930- service, return_code);
35931+ case 0: {
35932+ int cpu;
35933+ unsigned char flags;
35934+
35935+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
35936+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
35937+ printk(KERN_WARNING "bios32_service: not valid\n");
35938 return 0;
35939+ }
35940+ address = address + PAGE_OFFSET;
35941+ length += 16UL; /* some BIOSs underreport this... */
35942+ flags = 4;
35943+ if (length >= 64*1024*1024) {
35944+ length >>= PAGE_SHIFT;
35945+ flags |= 8;
35946+ }
35947+
35948+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
35949+ gdt = get_cpu_gdt_table(cpu);
35950+ pack_descriptor(&d, address, length, 0x9b, flags);
35951+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
35952+ pack_descriptor(&d, address, length, 0x93, flags);
35953+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
35954+ }
35955+ return entry;
35956+ }
35957+ case 0x80: /* Not present */
35958+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
35959+ return 0;
35960+ default: /* Shouldn't happen */
35961+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
35962+ service, return_code);
35963+ return 0;
35964 }
35965 }
35966
35967 static struct {
35968 unsigned long address;
35969 unsigned short segment;
35970-} pci_indirect = { 0, __KERNEL_CS };
35971+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
35972
35973-static int pci_bios_present;
35974+static int pci_bios_present __read_only;
35975
35976 static int check_pcibios(void)
35977 {
35978@@ -131,11 +174,13 @@ static int check_pcibios(void)
35979 unsigned long flags, pcibios_entry;
35980
35981 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
35982- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
35983+ pci_indirect.address = pcibios_entry;
35984
35985 local_irq_save(flags);
35986- __asm__(
35987- "lcall *(%%edi); cld\n\t"
35988+ __asm__("movw %w6, %%ds\n\t"
35989+ "lcall *%%ss:(%%edi); cld\n\t"
35990+ "push %%ss\n\t"
35991+ "pop %%ds\n\t"
35992 "jc 1f\n\t"
35993 "xor %%ah, %%ah\n"
35994 "1:"
35995@@ -144,7 +189,8 @@ static int check_pcibios(void)
35996 "=b" (ebx),
35997 "=c" (ecx)
35998 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
35999- "D" (&pci_indirect)
36000+ "D" (&pci_indirect),
36001+ "r" (__PCIBIOS_DS)
36002 : "memory");
36003 local_irq_restore(flags);
36004
36005@@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
36006
36007 switch (len) {
36008 case 1:
36009- __asm__("lcall *(%%esi); cld\n\t"
36010+ __asm__("movw %w6, %%ds\n\t"
36011+ "lcall *%%ss:(%%esi); cld\n\t"
36012+ "push %%ss\n\t"
36013+ "pop %%ds\n\t"
36014 "jc 1f\n\t"
36015 "xor %%ah, %%ah\n"
36016 "1:"
36017@@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
36018 : "1" (PCIBIOS_READ_CONFIG_BYTE),
36019 "b" (bx),
36020 "D" ((long)reg),
36021- "S" (&pci_indirect));
36022+ "S" (&pci_indirect),
36023+ "r" (__PCIBIOS_DS));
36024 /*
36025 * Zero-extend the result beyond 8 bits, do not trust the
36026 * BIOS having done it:
36027@@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
36028 *value &= 0xff;
36029 break;
36030 case 2:
36031- __asm__("lcall *(%%esi); cld\n\t"
36032+ __asm__("movw %w6, %%ds\n\t"
36033+ "lcall *%%ss:(%%esi); cld\n\t"
36034+ "push %%ss\n\t"
36035+ "pop %%ds\n\t"
36036 "jc 1f\n\t"
36037 "xor %%ah, %%ah\n"
36038 "1:"
36039@@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
36040 : "1" (PCIBIOS_READ_CONFIG_WORD),
36041 "b" (bx),
36042 "D" ((long)reg),
36043- "S" (&pci_indirect));
36044+ "S" (&pci_indirect),
36045+ "r" (__PCIBIOS_DS));
36046 /*
36047 * Zero-extend the result beyond 16 bits, do not trust the
36048 * BIOS having done it:
36049@@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
36050 *value &= 0xffff;
36051 break;
36052 case 4:
36053- __asm__("lcall *(%%esi); cld\n\t"
36054+ __asm__("movw %w6, %%ds\n\t"
36055+ "lcall *%%ss:(%%esi); cld\n\t"
36056+ "push %%ss\n\t"
36057+ "pop %%ds\n\t"
36058 "jc 1f\n\t"
36059 "xor %%ah, %%ah\n"
36060 "1:"
36061@@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
36062 : "1" (PCIBIOS_READ_CONFIG_DWORD),
36063 "b" (bx),
36064 "D" ((long)reg),
36065- "S" (&pci_indirect));
36066+ "S" (&pci_indirect),
36067+ "r" (__PCIBIOS_DS));
36068 break;
36069 }
36070
36071@@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
36072
36073 switch (len) {
36074 case 1:
36075- __asm__("lcall *(%%esi); cld\n\t"
36076+ __asm__("movw %w6, %%ds\n\t"
36077+ "lcall *%%ss:(%%esi); cld\n\t"
36078+ "push %%ss\n\t"
36079+ "pop %%ds\n\t"
36080 "jc 1f\n\t"
36081 "xor %%ah, %%ah\n"
36082 "1:"
36083@@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
36084 "c" (value),
36085 "b" (bx),
36086 "D" ((long)reg),
36087- "S" (&pci_indirect));
36088+ "S" (&pci_indirect),
36089+ "r" (__PCIBIOS_DS));
36090 break;
36091 case 2:
36092- __asm__("lcall *(%%esi); cld\n\t"
36093+ __asm__("movw %w6, %%ds\n\t"
36094+ "lcall *%%ss:(%%esi); cld\n\t"
36095+ "push %%ss\n\t"
36096+ "pop %%ds\n\t"
36097 "jc 1f\n\t"
36098 "xor %%ah, %%ah\n"
36099 "1:"
36100@@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
36101 "c" (value),
36102 "b" (bx),
36103 "D" ((long)reg),
36104- "S" (&pci_indirect));
36105+ "S" (&pci_indirect),
36106+ "r" (__PCIBIOS_DS));
36107 break;
36108 case 4:
36109- __asm__("lcall *(%%esi); cld\n\t"
36110+ __asm__("movw %w6, %%ds\n\t"
36111+ "lcall *%%ss:(%%esi); cld\n\t"
36112+ "push %%ss\n\t"
36113+ "pop %%ds\n\t"
36114 "jc 1f\n\t"
36115 "xor %%ah, %%ah\n"
36116 "1:"
36117@@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
36118 "c" (value),
36119 "b" (bx),
36120 "D" ((long)reg),
36121- "S" (&pci_indirect));
36122+ "S" (&pci_indirect),
36123+ "r" (__PCIBIOS_DS));
36124 break;
36125 }
36126
36127@@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
36128
36129 DBG("PCI: Fetching IRQ routing table... ");
36130 __asm__("push %%es\n\t"
36131+ "movw %w8, %%ds\n\t"
36132 "push %%ds\n\t"
36133 "pop %%es\n\t"
36134- "lcall *(%%esi); cld\n\t"
36135+ "lcall *%%ss:(%%esi); cld\n\t"
36136 "pop %%es\n\t"
36137+ "push %%ss\n\t"
36138+ "pop %%ds\n"
36139 "jc 1f\n\t"
36140 "xor %%ah, %%ah\n"
36141 "1:"
36142@@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
36143 "1" (0),
36144 "D" ((long) &opt),
36145 "S" (&pci_indirect),
36146- "m" (opt)
36147+ "m" (opt),
36148+ "r" (__PCIBIOS_DS)
36149 : "memory");
36150 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
36151 if (ret & 0xff00)
36152@@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
36153 {
36154 int ret;
36155
36156- __asm__("lcall *(%%esi); cld\n\t"
36157+ __asm__("movw %w5, %%ds\n\t"
36158+ "lcall *%%ss:(%%esi); cld\n\t"
36159+ "push %%ss\n\t"
36160+ "pop %%ds\n"
36161 "jc 1f\n\t"
36162 "xor %%ah, %%ah\n"
36163 "1:"
36164@@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
36165 : "0" (PCIBIOS_SET_PCI_HW_INT),
36166 "b" ((dev->bus->number << 8) | dev->devfn),
36167 "c" ((irq << 8) | (pin + 10)),
36168- "S" (&pci_indirect));
36169+ "S" (&pci_indirect),
36170+ "r" (__PCIBIOS_DS));
36171 return !(ret & 0xff00);
36172 }
36173 EXPORT_SYMBOL(pcibios_set_irq_routing);
36174diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
36175index 9ee3491..872192f 100644
36176--- a/arch/x86/platform/efi/efi_32.c
36177+++ b/arch/x86/platform/efi/efi_32.c
36178@@ -59,11 +59,22 @@ void efi_call_phys_prelog(void)
36179 {
36180 struct desc_ptr gdt_descr;
36181
36182+#ifdef CONFIG_PAX_KERNEXEC
36183+ struct desc_struct d;
36184+#endif
36185+
36186 local_irq_save(efi_rt_eflags);
36187
36188 load_cr3(initial_page_table);
36189 __flush_tlb_all();
36190
36191+#ifdef CONFIG_PAX_KERNEXEC
36192+ pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
36193+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
36194+ pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
36195+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
36196+#endif
36197+
36198 gdt_descr.address = __pa(get_cpu_gdt_table(0));
36199 gdt_descr.size = GDT_SIZE - 1;
36200 load_gdt(&gdt_descr);
36201@@ -73,11 +84,24 @@ void efi_call_phys_epilog(void)
36202 {
36203 struct desc_ptr gdt_descr;
36204
36205+#ifdef CONFIG_PAX_KERNEXEC
36206+ struct desc_struct d;
36207+
36208+ memset(&d, 0, sizeof d);
36209+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
36210+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
36211+#endif
36212+
36213 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
36214 gdt_descr.size = GDT_SIZE - 1;
36215 load_gdt(&gdt_descr);
36216
36217+#ifdef CONFIG_PAX_PER_CPU_PGD
36218+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
36219+#else
36220 load_cr3(swapper_pg_dir);
36221+#endif
36222+
36223 __flush_tlb_all();
36224
36225 local_irq_restore(efi_rt_eflags);
36226diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
36227index 290d397..e09d270 100644
36228--- a/arch/x86/platform/efi/efi_64.c
36229+++ b/arch/x86/platform/efi/efi_64.c
36230@@ -99,6 +99,11 @@ void __init efi_call_phys_prelog(void)
36231 vaddress = (unsigned long)__va(pgd * PGDIR_SIZE);
36232 set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress));
36233 }
36234+
36235+#ifdef CONFIG_PAX_PER_CPU_PGD
36236+ load_cr3(swapper_pg_dir);
36237+#endif
36238+
36239 __flush_tlb_all();
36240 }
36241
36242@@ -116,6 +121,11 @@ void __init efi_call_phys_epilog(void)
36243 for (pgd = 0; pgd < n_pgds; pgd++)
36244 set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), save_pgd[pgd]);
36245 kfree(save_pgd);
36246+
36247+#ifdef CONFIG_PAX_PER_CPU_PGD
36248+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
36249+#endif
36250+
36251 __flush_tlb_all();
36252 local_irq_restore(efi_flags);
36253 early_code_mapping_set_exec(0);
36254@@ -146,8 +156,23 @@ int efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
36255 unsigned npages;
36256 pgd_t *pgd;
36257
36258- if (efi_enabled(EFI_OLD_MEMMAP))
36259+ if (efi_enabled(EFI_OLD_MEMMAP)) {
36260+ /* PaX: We need to disable the NX bit in the PGD, otherwise we won't be
36261+ * able to execute the EFI services.
36262+ */
36263+ if (__supported_pte_mask & _PAGE_NX) {
36264+ unsigned long addr = (unsigned long) __va(0);
36265+ pgd_t pe = __pgd(pgd_val(*pgd_offset_k(addr)) & ~_PAGE_NX);
36266+
36267+ pr_alert("PAX: Disabling NX protection for low memory map. Try booting without \"efi=old_map\"\n");
36268+#ifdef CONFIG_PAX_PER_CPU_PGD
36269+ set_pgd(pgd_offset_cpu(0, kernel, addr), pe);
36270+#endif
36271+ set_pgd(pgd_offset_k(addr), pe);
36272+ }
36273+
36274 return 0;
36275+ }
36276
36277 efi_scratch.efi_pgt = (pgd_t *)(unsigned long)real_mode_header->trampoline_pgd;
36278 pgd = __va(efi_scratch.efi_pgt);
36279diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
36280index fbe66e6..eae5e38 100644
36281--- a/arch/x86/platform/efi/efi_stub_32.S
36282+++ b/arch/x86/platform/efi/efi_stub_32.S
36283@@ -6,7 +6,9 @@
36284 */
36285
36286 #include <linux/linkage.h>
36287+#include <linux/init.h>
36288 #include <asm/page_types.h>
36289+#include <asm/segment.h>
36290
36291 /*
36292 * efi_call_phys(void *, ...) is a function with variable parameters.
36293@@ -20,7 +22,7 @@
36294 * service functions will comply with gcc calling convention, too.
36295 */
36296
36297-.text
36298+__INIT
36299 ENTRY(efi_call_phys)
36300 /*
36301 * 0. The function can only be called in Linux kernel. So CS has been
36302@@ -36,10 +38,24 @@ ENTRY(efi_call_phys)
36303 * The mapping of lower virtual memory has been created in prelog and
36304 * epilog.
36305 */
36306- movl $1f, %edx
36307- subl $__PAGE_OFFSET, %edx
36308- jmp *%edx
36309+#ifdef CONFIG_PAX_KERNEXEC
36310+ movl $(__KERNEXEC_EFI_DS), %edx
36311+ mov %edx, %ds
36312+ mov %edx, %es
36313+ mov %edx, %ss
36314+ addl $2f,(1f)
36315+ ljmp *(1f)
36316+
36317+__INITDATA
36318+1: .long __LOAD_PHYSICAL_ADDR, __KERNEXEC_EFI_CS
36319+.previous
36320+
36321+2:
36322+ subl $2b,(1b)
36323+#else
36324+ jmp 1f-__PAGE_OFFSET
36325 1:
36326+#endif
36327
36328 /*
36329 * 2. Now on the top of stack is the return
36330@@ -47,14 +63,8 @@ ENTRY(efi_call_phys)
36331 * parameter 2, ..., param n. To make things easy, we save the return
36332 * address of efi_call_phys in a global variable.
36333 */
36334- popl %edx
36335- movl %edx, saved_return_addr
36336- /* get the function pointer into ECX*/
36337- popl %ecx
36338- movl %ecx, efi_rt_function_ptr
36339- movl $2f, %edx
36340- subl $__PAGE_OFFSET, %edx
36341- pushl %edx
36342+ popl (saved_return_addr)
36343+ popl (efi_rt_function_ptr)
36344
36345 /*
36346 * 3. Clear PG bit in %CR0.
36347@@ -73,9 +83,8 @@ ENTRY(efi_call_phys)
36348 /*
36349 * 5. Call the physical function.
36350 */
36351- jmp *%ecx
36352+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
36353
36354-2:
36355 /*
36356 * 6. After EFI runtime service returns, control will return to
36357 * following instruction. We'd better readjust stack pointer first.
36358@@ -88,35 +97,36 @@ ENTRY(efi_call_phys)
36359 movl %cr0, %edx
36360 orl $0x80000000, %edx
36361 movl %edx, %cr0
36362- jmp 1f
36363-1:
36364+
36365 /*
36366 * 8. Now restore the virtual mode from flat mode by
36367 * adding EIP with PAGE_OFFSET.
36368 */
36369- movl $1f, %edx
36370- jmp *%edx
36371+#ifdef CONFIG_PAX_KERNEXEC
36372+ movl $(__KERNEL_DS), %edx
36373+ mov %edx, %ds
36374+ mov %edx, %es
36375+ mov %edx, %ss
36376+ ljmp $(__KERNEL_CS),$1f
36377+#else
36378+ jmp 1f+__PAGE_OFFSET
36379+#endif
36380 1:
36381
36382 /*
36383 * 9. Balance the stack. And because EAX contain the return value,
36384 * we'd better not clobber it.
36385 */
36386- leal efi_rt_function_ptr, %edx
36387- movl (%edx), %ecx
36388- pushl %ecx
36389+ pushl (efi_rt_function_ptr)
36390
36391 /*
36392- * 10. Push the saved return address onto the stack and return.
36393+ * 10. Return to the saved return address.
36394 */
36395- leal saved_return_addr, %edx
36396- movl (%edx), %ecx
36397- pushl %ecx
36398- ret
36399+ jmpl *(saved_return_addr)
36400 ENDPROC(efi_call_phys)
36401 .previous
36402
36403-.data
36404+__INITDATA
36405 saved_return_addr:
36406 .long 0
36407 efi_rt_function_ptr:
36408diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
36409index 5fcda72..cd4dc41 100644
36410--- a/arch/x86/platform/efi/efi_stub_64.S
36411+++ b/arch/x86/platform/efi/efi_stub_64.S
36412@@ -11,6 +11,7 @@
36413 #include <asm/msr.h>
36414 #include <asm/processor-flags.h>
36415 #include <asm/page_types.h>
36416+#include <asm/alternative-asm.h>
36417
36418 #define SAVE_XMM \
36419 mov %rsp, %rax; \
36420@@ -88,6 +89,7 @@ ENTRY(efi_call)
36421 RESTORE_PGT
36422 addq $48, %rsp
36423 RESTORE_XMM
36424+ pax_force_retaddr 0, 1
36425 ret
36426 ENDPROC(efi_call)
36427
36428@@ -245,8 +247,8 @@ efi_gdt64:
36429 .long 0 /* Filled out by user */
36430 .word 0
36431 .quad 0x0000000000000000 /* NULL descriptor */
36432- .quad 0x00af9a000000ffff /* __KERNEL_CS */
36433- .quad 0x00cf92000000ffff /* __KERNEL_DS */
36434+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
36435+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
36436 .quad 0x0080890000000000 /* TS descriptor */
36437 .quad 0x0000000000000000 /* TS continued */
36438 efi_gdt64_end:
36439diff --git a/arch/x86/platform/intel-mid/intel-mid.c b/arch/x86/platform/intel-mid/intel-mid.c
36440index 1bbedc4..eb795b5 100644
36441--- a/arch/x86/platform/intel-mid/intel-mid.c
36442+++ b/arch/x86/platform/intel-mid/intel-mid.c
36443@@ -71,9 +71,10 @@ static void intel_mid_power_off(void)
36444 {
36445 };
36446
36447-static void intel_mid_reboot(void)
36448+static void __noreturn intel_mid_reboot(void)
36449 {
36450 intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
36451+ BUG();
36452 }
36453
36454 static unsigned long __init intel_mid_calibrate_tsc(void)
36455diff --git a/arch/x86/platform/intel-mid/intel_mid_weak_decls.h b/arch/x86/platform/intel-mid/intel_mid_weak_decls.h
36456index 46aa25c..59a68ed 100644
36457--- a/arch/x86/platform/intel-mid/intel_mid_weak_decls.h
36458+++ b/arch/x86/platform/intel-mid/intel_mid_weak_decls.h
36459@@ -10,10 +10,9 @@
36460 */
36461
36462
36463-/* __attribute__((weak)) makes these declarations overridable */
36464 /* For every CPU addition a new get_<cpuname>_ops interface needs
36465 * to be added.
36466 */
36467-extern void *get_penwell_ops(void) __attribute__((weak));
36468-extern void *get_cloverview_ops(void) __attribute__((weak));
36469-extern void *get_tangier_ops(void) __attribute__((weak));
36470+extern const void *get_penwell_ops(void);
36471+extern const void *get_cloverview_ops(void);
36472+extern const void *get_tangier_ops(void);
36473diff --git a/arch/x86/platform/intel-mid/mfld.c b/arch/x86/platform/intel-mid/mfld.c
36474index 23381d2..8ddc10e 100644
36475--- a/arch/x86/platform/intel-mid/mfld.c
36476+++ b/arch/x86/platform/intel-mid/mfld.c
36477@@ -64,12 +64,12 @@ static void __init penwell_arch_setup(void)
36478 pm_power_off = mfld_power_off;
36479 }
36480
36481-void *get_penwell_ops(void)
36482+const void *get_penwell_ops(void)
36483 {
36484 return &penwell_ops;
36485 }
36486
36487-void *get_cloverview_ops(void)
36488+const void *get_cloverview_ops(void)
36489 {
36490 return &penwell_ops;
36491 }
36492diff --git a/arch/x86/platform/intel-mid/mrfl.c b/arch/x86/platform/intel-mid/mrfl.c
36493index aaca917..66eadbc 100644
36494--- a/arch/x86/platform/intel-mid/mrfl.c
36495+++ b/arch/x86/platform/intel-mid/mrfl.c
36496@@ -97,7 +97,7 @@ static struct intel_mid_ops tangier_ops = {
36497 .arch_setup = tangier_arch_setup,
36498 };
36499
36500-void *get_tangier_ops(void)
36501+const void *get_tangier_ops(void)
36502 {
36503 return &tangier_ops;
36504 }
36505diff --git a/arch/x86/platform/olpc/olpc_dt.c b/arch/x86/platform/olpc/olpc_dt.c
36506index d6ee929..3637cb5 100644
36507--- a/arch/x86/platform/olpc/olpc_dt.c
36508+++ b/arch/x86/platform/olpc/olpc_dt.c
36509@@ -156,7 +156,7 @@ void * __init prom_early_alloc(unsigned long size)
36510 return res;
36511 }
36512
36513-static struct of_pdt_ops prom_olpc_ops __initdata = {
36514+static struct of_pdt_ops prom_olpc_ops __initconst = {
36515 .nextprop = olpc_dt_nextprop,
36516 .getproplen = olpc_dt_getproplen,
36517 .getproperty = olpc_dt_getproperty,
36518diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
36519index 6ec7910..ecdbb11 100644
36520--- a/arch/x86/power/cpu.c
36521+++ b/arch/x86/power/cpu.c
36522@@ -137,11 +137,8 @@ static void do_fpu_end(void)
36523 static void fix_processor_context(void)
36524 {
36525 int cpu = smp_processor_id();
36526- struct tss_struct *t = &per_cpu(init_tss, cpu);
36527-#ifdef CONFIG_X86_64
36528- struct desc_struct *desc = get_cpu_gdt_table(cpu);
36529- tss_desc tss;
36530-#endif
36531+ struct tss_struct *t = init_tss + cpu;
36532+
36533 set_tss_desc(cpu, t); /*
36534 * This just modifies memory; should not be
36535 * necessary. But... This is necessary, because
36536@@ -150,10 +147,6 @@ static void fix_processor_context(void)
36537 */
36538
36539 #ifdef CONFIG_X86_64
36540- memcpy(&tss, &desc[GDT_ENTRY_TSS], sizeof(tss_desc));
36541- tss.type = 0x9; /* The available 64-bit TSS (see AMD vol 2, pg 91 */
36542- write_gdt_entry(desc, GDT_ENTRY_TSS, &tss, DESC_TSS);
36543-
36544 syscall_init(); /* This sets MSR_*STAR and related */
36545 #endif
36546 load_TR_desc(); /* This does ltr */
36547diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
36548index bad628a..a102610 100644
36549--- a/arch/x86/realmode/init.c
36550+++ b/arch/x86/realmode/init.c
36551@@ -68,7 +68,13 @@ void __init setup_real_mode(void)
36552 __va(real_mode_header->trampoline_header);
36553
36554 #ifdef CONFIG_X86_32
36555- trampoline_header->start = __pa_symbol(startup_32_smp);
36556+ trampoline_header->start = __pa_symbol(ktla_ktva(startup_32_smp));
36557+
36558+#ifdef CONFIG_PAX_KERNEXEC
36559+ trampoline_header->start -= LOAD_PHYSICAL_ADDR;
36560+#endif
36561+
36562+ trampoline_header->boot_cs = __BOOT_CS;
36563 trampoline_header->gdt_limit = __BOOT_DS + 7;
36564 trampoline_header->gdt_base = __pa_symbol(boot_gdt);
36565 #else
36566@@ -84,7 +90,7 @@ void __init setup_real_mode(void)
36567 *trampoline_cr4_features = read_cr4();
36568
36569 trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
36570- trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd;
36571+ trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd & ~_PAGE_NX;
36572 trampoline_pgd[511] = init_level4_pgt[511].pgd;
36573 #endif
36574 }
36575diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
36576index 7c0d7be..d24dc88 100644
36577--- a/arch/x86/realmode/rm/Makefile
36578+++ b/arch/x86/realmode/rm/Makefile
36579@@ -67,5 +67,8 @@ $(obj)/realmode.relocs: $(obj)/realmode.elf FORCE
36580
36581 KBUILD_CFLAGS := $(LINUXINCLUDE) $(REALMODE_CFLAGS) -D_SETUP -D_WAKEUP \
36582 -I$(srctree)/arch/x86/boot
36583+ifdef CONSTIFY_PLUGIN
36584+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
36585+endif
36586 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
36587 GCOV_PROFILE := n
36588diff --git a/arch/x86/realmode/rm/header.S b/arch/x86/realmode/rm/header.S
36589index a28221d..93c40f1 100644
36590--- a/arch/x86/realmode/rm/header.S
36591+++ b/arch/x86/realmode/rm/header.S
36592@@ -30,7 +30,9 @@ GLOBAL(real_mode_header)
36593 #endif
36594 /* APM/BIOS reboot */
36595 .long pa_machine_real_restart_asm
36596-#ifdef CONFIG_X86_64
36597+#ifdef CONFIG_X86_32
36598+ .long __KERNEL_CS
36599+#else
36600 .long __KERNEL32_CS
36601 #endif
36602 END(real_mode_header)
36603diff --git a/arch/x86/realmode/rm/trampoline_32.S b/arch/x86/realmode/rm/trampoline_32.S
36604index 48ddd76..c26749f 100644
36605--- a/arch/x86/realmode/rm/trampoline_32.S
36606+++ b/arch/x86/realmode/rm/trampoline_32.S
36607@@ -24,6 +24,12 @@
36608 #include <asm/page_types.h>
36609 #include "realmode.h"
36610
36611+#ifdef CONFIG_PAX_KERNEXEC
36612+#define ta(X) (X)
36613+#else
36614+#define ta(X) (pa_ ## X)
36615+#endif
36616+
36617 .text
36618 .code16
36619
36620@@ -38,8 +44,6 @@ ENTRY(trampoline_start)
36621
36622 cli # We should be safe anyway
36623
36624- movl tr_start, %eax # where we need to go
36625-
36626 movl $0xA5A5A5A5, trampoline_status
36627 # write marker for master knows we're running
36628
36629@@ -55,7 +59,7 @@ ENTRY(trampoline_start)
36630 movw $1, %dx # protected mode (PE) bit
36631 lmsw %dx # into protected mode
36632
36633- ljmpl $__BOOT_CS, $pa_startup_32
36634+ ljmpl *(trampoline_header)
36635
36636 .section ".text32","ax"
36637 .code32
36638@@ -66,7 +70,7 @@ ENTRY(startup_32) # note: also used from wakeup_asm.S
36639 .balign 8
36640 GLOBAL(trampoline_header)
36641 tr_start: .space 4
36642- tr_gdt_pad: .space 2
36643+ tr_boot_cs: .space 2
36644 tr_gdt: .space 6
36645 END(trampoline_header)
36646
36647diff --git a/arch/x86/realmode/rm/trampoline_64.S b/arch/x86/realmode/rm/trampoline_64.S
36648index dac7b20..72dbaca 100644
36649--- a/arch/x86/realmode/rm/trampoline_64.S
36650+++ b/arch/x86/realmode/rm/trampoline_64.S
36651@@ -93,6 +93,7 @@ ENTRY(startup_32)
36652 movl %edx, %gs
36653
36654 movl pa_tr_cr4, %eax
36655+ andl $~X86_CR4_PCIDE, %eax
36656 movl %eax, %cr4 # Enable PAE mode
36657
36658 # Setup trampoline 4 level pagetables
36659@@ -106,7 +107,7 @@ ENTRY(startup_32)
36660 wrmsr
36661
36662 # Enable paging and in turn activate Long Mode
36663- movl $(X86_CR0_PG | X86_CR0_WP | X86_CR0_PE), %eax
36664+ movl $(X86_CR0_PG | X86_CR0_PE), %eax
36665 movl %eax, %cr0
36666
36667 /*
36668diff --git a/arch/x86/realmode/rm/wakeup_asm.S b/arch/x86/realmode/rm/wakeup_asm.S
36669index 9e7e147..25a4158 100644
36670--- a/arch/x86/realmode/rm/wakeup_asm.S
36671+++ b/arch/x86/realmode/rm/wakeup_asm.S
36672@@ -126,11 +126,10 @@ ENTRY(wakeup_start)
36673 lgdtl pmode_gdt
36674
36675 /* This really couldn't... */
36676- movl pmode_entry, %eax
36677 movl pmode_cr0, %ecx
36678 movl %ecx, %cr0
36679- ljmpl $__KERNEL_CS, $pa_startup_32
36680- /* -> jmp *%eax in trampoline_32.S */
36681+
36682+ ljmpl *pmode_entry
36683 #else
36684 jmp trampoline_start
36685 #endif
36686diff --git a/arch/x86/tools/Makefile b/arch/x86/tools/Makefile
36687index 604a37e..e49702a 100644
36688--- a/arch/x86/tools/Makefile
36689+++ b/arch/x86/tools/Makefile
36690@@ -37,7 +37,7 @@ $(obj)/test_get_len.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/in
36691
36692 $(obj)/insn_sanity.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c
36693
36694-HOST_EXTRACFLAGS += -I$(srctree)/tools/include
36695+HOST_EXTRACFLAGS += -I$(srctree)/tools/include -ggdb
36696 hostprogs-y += relocs
36697 relocs-objs := relocs_32.o relocs_64.o relocs_common.o
36698 PHONY += relocs
36699diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
36700index bbb1d22..e505211 100644
36701--- a/arch/x86/tools/relocs.c
36702+++ b/arch/x86/tools/relocs.c
36703@@ -1,5 +1,7 @@
36704 /* This is included from relocs_32/64.c */
36705
36706+#include "../../../include/generated/autoconf.h"
36707+
36708 #define ElfW(type) _ElfW(ELF_BITS, type)
36709 #define _ElfW(bits, type) __ElfW(bits, type)
36710 #define __ElfW(bits, type) Elf##bits##_##type
36711@@ -11,6 +13,7 @@
36712 #define Elf_Sym ElfW(Sym)
36713
36714 static Elf_Ehdr ehdr;
36715+static Elf_Phdr *phdr;
36716
36717 struct relocs {
36718 uint32_t *offset;
36719@@ -383,9 +386,39 @@ static void read_ehdr(FILE *fp)
36720 }
36721 }
36722
36723+static void read_phdrs(FILE *fp)
36724+{
36725+ unsigned int i;
36726+
36727+ phdr = calloc(ehdr.e_phnum, sizeof(Elf_Phdr));
36728+ if (!phdr) {
36729+ die("Unable to allocate %d program headers\n",
36730+ ehdr.e_phnum);
36731+ }
36732+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
36733+ die("Seek to %d failed: %s\n",
36734+ ehdr.e_phoff, strerror(errno));
36735+ }
36736+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
36737+ die("Cannot read ELF program headers: %s\n",
36738+ strerror(errno));
36739+ }
36740+ for(i = 0; i < ehdr.e_phnum; i++) {
36741+ phdr[i].p_type = elf_word_to_cpu(phdr[i].p_type);
36742+ phdr[i].p_offset = elf_off_to_cpu(phdr[i].p_offset);
36743+ phdr[i].p_vaddr = elf_addr_to_cpu(phdr[i].p_vaddr);
36744+ phdr[i].p_paddr = elf_addr_to_cpu(phdr[i].p_paddr);
36745+ phdr[i].p_filesz = elf_word_to_cpu(phdr[i].p_filesz);
36746+ phdr[i].p_memsz = elf_word_to_cpu(phdr[i].p_memsz);
36747+ phdr[i].p_flags = elf_word_to_cpu(phdr[i].p_flags);
36748+ phdr[i].p_align = elf_word_to_cpu(phdr[i].p_align);
36749+ }
36750+
36751+}
36752+
36753 static void read_shdrs(FILE *fp)
36754 {
36755- int i;
36756+ unsigned int i;
36757 Elf_Shdr shdr;
36758
36759 secs = calloc(ehdr.e_shnum, sizeof(struct section));
36760@@ -420,7 +453,7 @@ static void read_shdrs(FILE *fp)
36761
36762 static void read_strtabs(FILE *fp)
36763 {
36764- int i;
36765+ unsigned int i;
36766 for (i = 0; i < ehdr.e_shnum; i++) {
36767 struct section *sec = &secs[i];
36768 if (sec->shdr.sh_type != SHT_STRTAB) {
36769@@ -445,7 +478,7 @@ static void read_strtabs(FILE *fp)
36770
36771 static void read_symtabs(FILE *fp)
36772 {
36773- int i,j;
36774+ unsigned int i,j;
36775 for (i = 0; i < ehdr.e_shnum; i++) {
36776 struct section *sec = &secs[i];
36777 if (sec->shdr.sh_type != SHT_SYMTAB) {
36778@@ -476,9 +509,11 @@ static void read_symtabs(FILE *fp)
36779 }
36780
36781
36782-static void read_relocs(FILE *fp)
36783+static void read_relocs(FILE *fp, int use_real_mode)
36784 {
36785- int i,j;
36786+ unsigned int i,j;
36787+ uint32_t base;
36788+
36789 for (i = 0; i < ehdr.e_shnum; i++) {
36790 struct section *sec = &secs[i];
36791 if (sec->shdr.sh_type != SHT_REL_TYPE) {
36792@@ -498,9 +533,22 @@ static void read_relocs(FILE *fp)
36793 die("Cannot read symbol table: %s\n",
36794 strerror(errno));
36795 }
36796+ base = 0;
36797+
36798+#ifdef CONFIG_X86_32
36799+ for (j = 0; !use_real_mode && j < ehdr.e_phnum; j++) {
36800+ if (phdr[j].p_type != PT_LOAD )
36801+ continue;
36802+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
36803+ continue;
36804+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
36805+ break;
36806+ }
36807+#endif
36808+
36809 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Rel); j++) {
36810 Elf_Rel *rel = &sec->reltab[j];
36811- rel->r_offset = elf_addr_to_cpu(rel->r_offset);
36812+ rel->r_offset = elf_addr_to_cpu(rel->r_offset) + base;
36813 rel->r_info = elf_xword_to_cpu(rel->r_info);
36814 #if (SHT_REL_TYPE == SHT_RELA)
36815 rel->r_addend = elf_xword_to_cpu(rel->r_addend);
36816@@ -512,7 +560,7 @@ static void read_relocs(FILE *fp)
36817
36818 static void print_absolute_symbols(void)
36819 {
36820- int i;
36821+ unsigned int i;
36822 const char *format;
36823
36824 if (ELF_BITS == 64)
36825@@ -525,7 +573,7 @@ static void print_absolute_symbols(void)
36826 for (i = 0; i < ehdr.e_shnum; i++) {
36827 struct section *sec = &secs[i];
36828 char *sym_strtab;
36829- int j;
36830+ unsigned int j;
36831
36832 if (sec->shdr.sh_type != SHT_SYMTAB) {
36833 continue;
36834@@ -552,7 +600,7 @@ static void print_absolute_symbols(void)
36835
36836 static void print_absolute_relocs(void)
36837 {
36838- int i, printed = 0;
36839+ unsigned int i, printed = 0;
36840 const char *format;
36841
36842 if (ELF_BITS == 64)
36843@@ -565,7 +613,7 @@ static void print_absolute_relocs(void)
36844 struct section *sec_applies, *sec_symtab;
36845 char *sym_strtab;
36846 Elf_Sym *sh_symtab;
36847- int j;
36848+ unsigned int j;
36849 if (sec->shdr.sh_type != SHT_REL_TYPE) {
36850 continue;
36851 }
36852@@ -642,13 +690,13 @@ static void add_reloc(struct relocs *r, uint32_t offset)
36853 static void walk_relocs(int (*process)(struct section *sec, Elf_Rel *rel,
36854 Elf_Sym *sym, const char *symname))
36855 {
36856- int i;
36857+ unsigned int i;
36858 /* Walk through the relocations */
36859 for (i = 0; i < ehdr.e_shnum; i++) {
36860 char *sym_strtab;
36861 Elf_Sym *sh_symtab;
36862 struct section *sec_applies, *sec_symtab;
36863- int j;
36864+ unsigned int j;
36865 struct section *sec = &secs[i];
36866
36867 if (sec->shdr.sh_type != SHT_REL_TYPE) {
36868@@ -822,6 +870,23 @@ static int do_reloc32(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
36869 {
36870 unsigned r_type = ELF32_R_TYPE(rel->r_info);
36871 int shn_abs = (sym->st_shndx == SHN_ABS) && !is_reloc(S_REL, symname);
36872+ char *sym_strtab = sec->link->link->strtab;
36873+
36874+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
36875+ if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
36876+ return 0;
36877+
36878+#ifdef CONFIG_PAX_KERNEXEC
36879+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
36880+ if (!strcmp(sec_name(sym->st_shndx), ".text.end") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
36881+ return 0;
36882+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
36883+ return 0;
36884+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
36885+ return 0;
36886+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
36887+ return 0;
36888+#endif
36889
36890 switch (r_type) {
36891 case R_386_NONE:
36892@@ -960,7 +1025,7 @@ static int write32_as_text(uint32_t v, FILE *f)
36893
36894 static void emit_relocs(int as_text, int use_real_mode)
36895 {
36896- int i;
36897+ unsigned int i;
36898 int (*write_reloc)(uint32_t, FILE *) = write32;
36899 int (*do_reloc)(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
36900 const char *symname);
36901@@ -1060,10 +1125,11 @@ void process(FILE *fp, int use_real_mode, int as_text,
36902 {
36903 regex_init(use_real_mode);
36904 read_ehdr(fp);
36905+ read_phdrs(fp);
36906 read_shdrs(fp);
36907 read_strtabs(fp);
36908 read_symtabs(fp);
36909- read_relocs(fp);
36910+ read_relocs(fp, use_real_mode);
36911 if (ELF_BITS == 64)
36912 percpu_init();
36913 if (show_absolute_syms) {
36914diff --git a/arch/x86/um/mem_32.c b/arch/x86/um/mem_32.c
36915index f40281e..92728c9 100644
36916--- a/arch/x86/um/mem_32.c
36917+++ b/arch/x86/um/mem_32.c
36918@@ -21,7 +21,7 @@ static int __init gate_vma_init(void)
36919 gate_vma.vm_start = FIXADDR_USER_START;
36920 gate_vma.vm_end = FIXADDR_USER_END;
36921 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
36922- gate_vma.vm_page_prot = __P101;
36923+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
36924
36925 return 0;
36926 }
36927diff --git a/arch/x86/um/tls_32.c b/arch/x86/um/tls_32.c
36928index 80ffa5b..a33bd15 100644
36929--- a/arch/x86/um/tls_32.c
36930+++ b/arch/x86/um/tls_32.c
36931@@ -260,7 +260,7 @@ out:
36932 if (unlikely(task == current &&
36933 !t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed)) {
36934 printk(KERN_ERR "get_tls_entry: task with pid %d got here "
36935- "without flushed TLS.", current->pid);
36936+ "without flushed TLS.", task_pid_nr(current));
36937 }
36938
36939 return 0;
36940diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
36941index 5a4affe..9e2d522 100644
36942--- a/arch/x86/vdso/Makefile
36943+++ b/arch/x86/vdso/Makefile
36944@@ -174,7 +174,7 @@ quiet_cmd_vdso = VDSO $@
36945 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
36946 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
36947
36948-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \
36949+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \
36950 $(call cc-ldoption, -Wl$(comma)--build-id) -Wl,-Bsymbolic $(LTO_CFLAGS)
36951 GCOV_PROFILE := n
36952
36953diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
36954index e904c27..b9eaa03 100644
36955--- a/arch/x86/vdso/vdso32-setup.c
36956+++ b/arch/x86/vdso/vdso32-setup.c
36957@@ -14,6 +14,7 @@
36958 #include <asm/cpufeature.h>
36959 #include <asm/processor.h>
36960 #include <asm/vdso.h>
36961+#include <asm/mman.h>
36962
36963 #ifdef CONFIG_COMPAT_VDSO
36964 #define VDSO_DEFAULT 0
36965diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
36966index 970463b..da82d3e 100644
36967--- a/arch/x86/vdso/vma.c
36968+++ b/arch/x86/vdso/vma.c
36969@@ -16,10 +16,9 @@
36970 #include <asm/vdso.h>
36971 #include <asm/page.h>
36972 #include <asm/hpet.h>
36973+#include <asm/mman.h>
36974
36975 #if defined(CONFIG_X86_64)
36976-unsigned int __read_mostly vdso64_enabled = 1;
36977-
36978 extern unsigned short vdso_sync_cpuid;
36979 #endif
36980
36981@@ -101,6 +100,11 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
36982 .pages = no_pages,
36983 };
36984
36985+#ifdef CONFIG_PAX_RANDMMAP
36986+ if (mm->pax_flags & MF_PAX_RANDMMAP)
36987+ calculate_addr = false;
36988+#endif
36989+
36990 if (calculate_addr) {
36991 addr = vdso_addr(current->mm->start_stack,
36992 image->size - image->sym_vvar_start);
36993@@ -111,14 +115,14 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
36994 down_write(&mm->mmap_sem);
36995
36996 addr = get_unmapped_area(NULL, addr,
36997- image->size - image->sym_vvar_start, 0, 0);
36998+ image->size - image->sym_vvar_start, 0, MAP_EXECUTABLE);
36999 if (IS_ERR_VALUE(addr)) {
37000 ret = addr;
37001 goto up_fail;
37002 }
37003
37004 text_start = addr - image->sym_vvar_start;
37005- current->mm->context.vdso = (void __user *)text_start;
37006+ mm->context.vdso = text_start;
37007
37008 /*
37009 * MAYWRITE to allow gdb to COW and set breakpoints
37010@@ -163,15 +167,12 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
37011 hpet_address >> PAGE_SHIFT,
37012 PAGE_SIZE,
37013 pgprot_noncached(PAGE_READONLY));
37014-
37015- if (ret)
37016- goto up_fail;
37017 }
37018 #endif
37019
37020 up_fail:
37021 if (ret)
37022- current->mm->context.vdso = NULL;
37023+ current->mm->context.vdso = 0;
37024
37025 up_write(&mm->mmap_sem);
37026 return ret;
37027@@ -191,8 +192,8 @@ static int load_vdso32(void)
37028
37029 if (selected_vdso32->sym_VDSO32_SYSENTER_RETURN)
37030 current_thread_info()->sysenter_return =
37031- current->mm->context.vdso +
37032- selected_vdso32->sym_VDSO32_SYSENTER_RETURN;
37033+ (void __force_user *)(current->mm->context.vdso +
37034+ selected_vdso32->sym_VDSO32_SYSENTER_RETURN);
37035
37036 return 0;
37037 }
37038@@ -201,9 +202,6 @@ static int load_vdso32(void)
37039 #ifdef CONFIG_X86_64
37040 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
37041 {
37042- if (!vdso64_enabled)
37043- return 0;
37044-
37045 return map_vdso(&vdso_image_64, true);
37046 }
37047
37048@@ -212,12 +210,8 @@ int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
37049 int uses_interp)
37050 {
37051 #ifdef CONFIG_X86_X32_ABI
37052- if (test_thread_flag(TIF_X32)) {
37053- if (!vdso64_enabled)
37054- return 0;
37055-
37056+ if (test_thread_flag(TIF_X32))
37057 return map_vdso(&vdso_image_x32, true);
37058- }
37059 #endif
37060
37061 return load_vdso32();
37062@@ -229,12 +223,3 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
37063 return load_vdso32();
37064 }
37065 #endif
37066-
37067-#ifdef CONFIG_X86_64
37068-static __init int vdso_setup(char *s)
37069-{
37070- vdso64_enabled = simple_strtoul(s, NULL, 0);
37071- return 0;
37072-}
37073-__setup("vdso=", vdso_setup);
37074-#endif
37075diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig
37076index e88fda8..76ce7ce 100644
37077--- a/arch/x86/xen/Kconfig
37078+++ b/arch/x86/xen/Kconfig
37079@@ -9,6 +9,7 @@ config XEN
37080 select XEN_HAVE_PVMMU
37081 depends on X86_64 || (X86_32 && X86_PAE)
37082 depends on X86_TSC
37083+ depends on !GRKERNSEC_CONFIG_AUTO || GRKERNSEC_CONFIG_VIRT_XEN
37084 help
37085 This is the Linux Xen port. Enabling this will allow the
37086 kernel to boot in a paravirtualized environment under the
37087diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
37088index c0cb11f..bed56ff 100644
37089--- a/arch/x86/xen/enlighten.c
37090+++ b/arch/x86/xen/enlighten.c
37091@@ -123,8 +123,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
37092
37093 struct shared_info xen_dummy_shared_info;
37094
37095-void *xen_initial_gdt;
37096-
37097 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
37098 __read_mostly int xen_have_vector_callback;
37099 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
37100@@ -542,8 +540,7 @@ static void xen_load_gdt(const struct desc_ptr *dtr)
37101 {
37102 unsigned long va = dtr->address;
37103 unsigned int size = dtr->size + 1;
37104- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
37105- unsigned long frames[pages];
37106+ unsigned long frames[65536 / PAGE_SIZE];
37107 int f;
37108
37109 /*
37110@@ -591,8 +588,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
37111 {
37112 unsigned long va = dtr->address;
37113 unsigned int size = dtr->size + 1;
37114- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
37115- unsigned long frames[pages];
37116+ unsigned long frames[(GDT_SIZE + PAGE_SIZE - 1) / PAGE_SIZE];
37117 int f;
37118
37119 /*
37120@@ -600,7 +596,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
37121 * 8-byte entries, or 16 4k pages..
37122 */
37123
37124- BUG_ON(size > 65536);
37125+ BUG_ON(size > GDT_SIZE);
37126 BUG_ON(va & ~PAGE_MASK);
37127
37128 for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
37129@@ -989,7 +985,7 @@ static u32 xen_safe_apic_wait_icr_idle(void)
37130 return 0;
37131 }
37132
37133-static void set_xen_basic_apic_ops(void)
37134+static void __init set_xen_basic_apic_ops(void)
37135 {
37136 apic->read = xen_apic_read;
37137 apic->write = xen_apic_write;
37138@@ -1295,30 +1291,30 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
37139 #endif
37140 };
37141
37142-static void xen_reboot(int reason)
37143+static __noreturn void xen_reboot(int reason)
37144 {
37145 struct sched_shutdown r = { .reason = reason };
37146
37147- if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
37148- BUG();
37149+ HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
37150+ BUG();
37151 }
37152
37153-static void xen_restart(char *msg)
37154+static __noreturn void xen_restart(char *msg)
37155 {
37156 xen_reboot(SHUTDOWN_reboot);
37157 }
37158
37159-static void xen_emergency_restart(void)
37160+static __noreturn void xen_emergency_restart(void)
37161 {
37162 xen_reboot(SHUTDOWN_reboot);
37163 }
37164
37165-static void xen_machine_halt(void)
37166+static __noreturn void xen_machine_halt(void)
37167 {
37168 xen_reboot(SHUTDOWN_poweroff);
37169 }
37170
37171-static void xen_machine_power_off(void)
37172+static __noreturn void xen_machine_power_off(void)
37173 {
37174 if (pm_power_off)
37175 pm_power_off();
37176@@ -1568,7 +1564,17 @@ asmlinkage __visible void __init xen_start_kernel(void)
37177 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
37178
37179 /* Work out if we support NX */
37180- x86_configure_nx();
37181+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
37182+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
37183+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
37184+ unsigned l, h;
37185+
37186+ __supported_pte_mask |= _PAGE_NX;
37187+ rdmsr(MSR_EFER, l, h);
37188+ l |= EFER_NX;
37189+ wrmsr(MSR_EFER, l, h);
37190+ }
37191+#endif
37192
37193 /* Get mfn list */
37194 xen_build_dynamic_phys_to_machine();
37195@@ -1596,13 +1602,6 @@ asmlinkage __visible void __init xen_start_kernel(void)
37196
37197 machine_ops = xen_machine_ops;
37198
37199- /*
37200- * The only reliable way to retain the initial address of the
37201- * percpu gdt_page is to remember it here, so we can go and
37202- * mark it RW later, when the initial percpu area is freed.
37203- */
37204- xen_initial_gdt = &per_cpu(gdt_page, 0);
37205-
37206 xen_smp_init();
37207
37208 #ifdef CONFIG_ACPI_NUMA
37209diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
37210index 16fb009..02b7801 100644
37211--- a/arch/x86/xen/mmu.c
37212+++ b/arch/x86/xen/mmu.c
37213@@ -379,7 +379,7 @@ static pteval_t pte_mfn_to_pfn(pteval_t val)
37214 return val;
37215 }
37216
37217-static pteval_t pte_pfn_to_mfn(pteval_t val)
37218+static pteval_t __intentional_overflow(-1) pte_pfn_to_mfn(pteval_t val)
37219 {
37220 if (val & _PAGE_PRESENT) {
37221 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
37222@@ -1904,7 +1904,11 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
37223 * L3_k[511] -> level2_fixmap_pgt */
37224 convert_pfn_mfn(level3_kernel_pgt);
37225
37226+ convert_pfn_mfn(level3_vmalloc_start_pgt);
37227+ convert_pfn_mfn(level3_vmalloc_end_pgt);
37228+ convert_pfn_mfn(level3_vmemmap_pgt);
37229 /* L3_k[511][506] -> level1_fixmap_pgt */
37230+ /* L3_k[511][507] -> level1_vsyscall_pgt */
37231 convert_pfn_mfn(level2_fixmap_pgt);
37232 }
37233 /* We get [511][511] and have Xen's version of level2_kernel_pgt */
37234@@ -1929,11 +1933,16 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
37235 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
37236 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
37237 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
37238+ set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
37239+ set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
37240+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
37241 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
37242 set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
37243+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
37244 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
37245 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
37246 set_page_prot(level1_fixmap_pgt, PAGE_KERNEL_RO);
37247+ set_page_prot(level1_vsyscall_pgt, PAGE_KERNEL_RO);
37248
37249 /* Pin down new L4 */
37250 pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
37251@@ -2117,6 +2126,7 @@ static void __init xen_post_allocator_init(void)
37252 pv_mmu_ops.set_pud = xen_set_pud;
37253 #if PAGETABLE_LEVELS == 4
37254 pv_mmu_ops.set_pgd = xen_set_pgd;
37255+ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
37256 #endif
37257
37258 /* This will work as long as patching hasn't happened yet
37259@@ -2195,6 +2205,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
37260 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
37261 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
37262 .set_pgd = xen_set_pgd_hyper,
37263+ .set_pgd_batched = xen_set_pgd_hyper,
37264
37265 .alloc_pud = xen_alloc_pmd_init,
37266 .release_pud = xen_release_pmd_init,
37267diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
37268index 7005974..54fb05f 100644
37269--- a/arch/x86/xen/smp.c
37270+++ b/arch/x86/xen/smp.c
37271@@ -283,17 +283,13 @@ static void __init xen_smp_prepare_boot_cpu(void)
37272
37273 if (xen_pv_domain()) {
37274 if (!xen_feature(XENFEAT_writable_page_tables))
37275- /* We've switched to the "real" per-cpu gdt, so make
37276- * sure the old memory can be recycled. */
37277- make_lowmem_page_readwrite(xen_initial_gdt);
37278-
37279 #ifdef CONFIG_X86_32
37280 /*
37281 * Xen starts us with XEN_FLAT_RING1_DS, but linux code
37282 * expects __USER_DS
37283 */
37284- loadsegment(ds, __USER_DS);
37285- loadsegment(es, __USER_DS);
37286+ loadsegment(ds, __KERNEL_DS);
37287+ loadsegment(es, __KERNEL_DS);
37288 #endif
37289
37290 xen_filter_cpu_maps();
37291@@ -372,7 +368,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
37292 #ifdef CONFIG_X86_32
37293 /* Note: PVH is not yet supported on x86_32. */
37294 ctxt->user_regs.fs = __KERNEL_PERCPU;
37295- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
37296+ savesegment(gs, ctxt->user_regs.gs);
37297 #endif
37298 ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;
37299
37300@@ -381,8 +377,8 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
37301 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
37302 ctxt->flags = VGCF_IN_KERNEL;
37303 ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
37304- ctxt->user_regs.ds = __USER_DS;
37305- ctxt->user_regs.es = __USER_DS;
37306+ ctxt->user_regs.ds = __KERNEL_DS;
37307+ ctxt->user_regs.es = __KERNEL_DS;
37308 ctxt->user_regs.ss = __KERNEL_DS;
37309
37310 xen_copy_trap_info(ctxt->trap_ctxt);
37311@@ -437,14 +433,13 @@ static int xen_cpu_up(unsigned int cpu, struct task_struct *idle)
37312 int rc;
37313
37314 per_cpu(current_task, cpu) = idle;
37315+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
37316 #ifdef CONFIG_X86_32
37317 irq_ctx_init(cpu);
37318 #else
37319 clear_tsk_thread_flag(idle, TIF_FORK);
37320 #endif
37321- per_cpu(kernel_stack, cpu) =
37322- (unsigned long)task_stack_page(idle) -
37323- KERNEL_STACK_OFFSET + THREAD_SIZE;
37324+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
37325
37326 xen_setup_runstate_info(cpu);
37327 xen_setup_timer(cpu);
37328@@ -720,7 +715,7 @@ static const struct smp_ops xen_smp_ops __initconst = {
37329
37330 void __init xen_smp_init(void)
37331 {
37332- smp_ops = xen_smp_ops;
37333+ memcpy((void *)&smp_ops, &xen_smp_ops, sizeof smp_ops);
37334 xen_fill_possible_map();
37335 }
37336
37337diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
37338index fd92a64..1f72641 100644
37339--- a/arch/x86/xen/xen-asm_32.S
37340+++ b/arch/x86/xen/xen-asm_32.S
37341@@ -99,7 +99,7 @@ ENTRY(xen_iret)
37342 pushw %fs
37343 movl $(__KERNEL_PERCPU), %eax
37344 movl %eax, %fs
37345- movl %fs:xen_vcpu, %eax
37346+ mov PER_CPU_VAR(xen_vcpu), %eax
37347 POP_FS
37348 #else
37349 movl %ss:xen_vcpu, %eax
37350diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
37351index 485b695..fda3e7c 100644
37352--- a/arch/x86/xen/xen-head.S
37353+++ b/arch/x86/xen/xen-head.S
37354@@ -39,6 +39,17 @@ ENTRY(startup_xen)
37355 #ifdef CONFIG_X86_32
37356 mov %esi,xen_start_info
37357 mov $init_thread_union+THREAD_SIZE,%esp
37358+#ifdef CONFIG_SMP
37359+ movl $cpu_gdt_table,%edi
37360+ movl $__per_cpu_load,%eax
37361+ movw %ax,__KERNEL_PERCPU + 2(%edi)
37362+ rorl $16,%eax
37363+ movb %al,__KERNEL_PERCPU + 4(%edi)
37364+ movb %ah,__KERNEL_PERCPU + 7(%edi)
37365+ movl $__per_cpu_end - 1,%eax
37366+ subl $__per_cpu_start,%eax
37367+ movw %ax,__KERNEL_PERCPU + 0(%edi)
37368+#endif
37369 #else
37370 mov %rsi,xen_start_info
37371 mov $init_thread_union+THREAD_SIZE,%rsp
37372diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
37373index 28c7e0b..2acfec7 100644
37374--- a/arch/x86/xen/xen-ops.h
37375+++ b/arch/x86/xen/xen-ops.h
37376@@ -10,8 +10,6 @@
37377 extern const char xen_hypervisor_callback[];
37378 extern const char xen_failsafe_callback[];
37379
37380-extern void *xen_initial_gdt;
37381-
37382 struct trap_info;
37383 void xen_copy_trap_info(struct trap_info *traps);
37384
37385diff --git a/arch/xtensa/variants/dc232b/include/variant/core.h b/arch/xtensa/variants/dc232b/include/variant/core.h
37386index 525bd3d..ef888b1 100644
37387--- a/arch/xtensa/variants/dc232b/include/variant/core.h
37388+++ b/arch/xtensa/variants/dc232b/include/variant/core.h
37389@@ -119,9 +119,9 @@
37390 ----------------------------------------------------------------------*/
37391
37392 #define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
37393-#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
37394 #define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
37395 #define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
37396+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
37397
37398 #define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
37399 #define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
37400diff --git a/arch/xtensa/variants/fsf/include/variant/core.h b/arch/xtensa/variants/fsf/include/variant/core.h
37401index 2f33760..835e50a 100644
37402--- a/arch/xtensa/variants/fsf/include/variant/core.h
37403+++ b/arch/xtensa/variants/fsf/include/variant/core.h
37404@@ -11,6 +11,7 @@
37405 #ifndef _XTENSA_CORE_H
37406 #define _XTENSA_CORE_H
37407
37408+#include <linux/const.h>
37409
37410 /****************************************************************************
37411 Parameters Useful for Any Code, USER or PRIVILEGED
37412@@ -112,9 +113,9 @@
37413 ----------------------------------------------------------------------*/
37414
37415 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
37416-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
37417 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
37418 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
37419+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
37420
37421 #define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
37422 #define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
37423diff --git a/arch/xtensa/variants/s6000/include/variant/core.h b/arch/xtensa/variants/s6000/include/variant/core.h
37424index af00795..2bb8105 100644
37425--- a/arch/xtensa/variants/s6000/include/variant/core.h
37426+++ b/arch/xtensa/variants/s6000/include/variant/core.h
37427@@ -11,6 +11,7 @@
37428 #ifndef _XTENSA_CORE_CONFIGURATION_H
37429 #define _XTENSA_CORE_CONFIGURATION_H
37430
37431+#include <linux/const.h>
37432
37433 /****************************************************************************
37434 Parameters Useful for Any Code, USER or PRIVILEGED
37435@@ -118,9 +119,9 @@
37436 ----------------------------------------------------------------------*/
37437
37438 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
37439-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
37440 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
37441 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
37442+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
37443
37444 #define XCHAL_ICACHE_SIZE 32768 /* I-cache size in bytes or 0 */
37445 #define XCHAL_DCACHE_SIZE 32768 /* D-cache size in bytes or 0 */
37446diff --git a/block/bio.c b/block/bio.c
37447index 3e6331d..f970433 100644
37448--- a/block/bio.c
37449+++ b/block/bio.c
37450@@ -1160,7 +1160,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
37451 /*
37452 * Overflow, abort
37453 */
37454- if (end < start)
37455+ if (end < start || end - start > INT_MAX - nr_pages)
37456 return ERR_PTR(-EINVAL);
37457
37458 nr_pages += end - start;
37459@@ -1294,7 +1294,7 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
37460 /*
37461 * Overflow, abort
37462 */
37463- if (end < start)
37464+ if (end < start || end - start > INT_MAX - nr_pages)
37465 return ERR_PTR(-EINVAL);
37466
37467 nr_pages += end - start;
37468@@ -1556,7 +1556,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
37469 const int read = bio_data_dir(bio) == READ;
37470 struct bio_map_data *bmd = bio->bi_private;
37471 int i;
37472- char *p = bmd->sgvecs[0].iov_base;
37473+ char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
37474
37475 bio_for_each_segment_all(bvec, bio, i) {
37476 char *addr = page_address(bvec->bv_page);
37477diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
37478index e17da94..e01cce1 100644
37479--- a/block/blk-cgroup.c
37480+++ b/block/blk-cgroup.c
37481@@ -822,7 +822,7 @@ static void blkcg_css_free(struct cgroup_subsys_state *css)
37482 static struct cgroup_subsys_state *
37483 blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
37484 {
37485- static atomic64_t id_seq = ATOMIC64_INIT(0);
37486+ static atomic64_unchecked_t id_seq = ATOMIC64_INIT(0);
37487 struct blkcg *blkcg;
37488
37489 if (!parent_css) {
37490@@ -836,7 +836,7 @@ blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
37491
37492 blkcg->cfq_weight = CFQ_WEIGHT_DEFAULT;
37493 blkcg->cfq_leaf_weight = CFQ_WEIGHT_DEFAULT;
37494- blkcg->id = atomic64_inc_return(&id_seq); /* root is 0, start from 1 */
37495+ blkcg->id = atomic64_inc_return_unchecked(&id_seq); /* root is 0, start from 1 */
37496 done:
37497 spin_lock_init(&blkcg->lock);
37498 INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_ATOMIC);
37499diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
37500index 0736729..2ec3b48 100644
37501--- a/block/blk-iopoll.c
37502+++ b/block/blk-iopoll.c
37503@@ -74,7 +74,7 @@ void blk_iopoll_complete(struct blk_iopoll *iop)
37504 }
37505 EXPORT_SYMBOL(blk_iopoll_complete);
37506
37507-static void blk_iopoll_softirq(struct softirq_action *h)
37508+static __latent_entropy void blk_iopoll_softirq(void)
37509 {
37510 struct list_head *list = this_cpu_ptr(&blk_cpu_iopoll);
37511 int rearm = 0, budget = blk_iopoll_budget;
37512diff --git a/block/blk-map.c b/block/blk-map.c
37513index f890d43..97b0482 100644
37514--- a/block/blk-map.c
37515+++ b/block/blk-map.c
37516@@ -300,7 +300,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
37517 if (!len || !kbuf)
37518 return -EINVAL;
37519
37520- do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
37521+ do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
37522 if (do_copy)
37523 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
37524 else
37525diff --git a/block/blk-softirq.c b/block/blk-softirq.c
37526index 53b1737..08177d2e 100644
37527--- a/block/blk-softirq.c
37528+++ b/block/blk-softirq.c
37529@@ -18,7 +18,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
37530 * Softirq action handler - move entries to local list and loop over them
37531 * while passing them to the queue registered handler.
37532 */
37533-static void blk_done_softirq(struct softirq_action *h)
37534+static __latent_entropy void blk_done_softirq(void)
37535 {
37536 struct list_head *cpu_list, local_list;
37537
37538diff --git a/block/bsg.c b/block/bsg.c
37539index ff46add..c4ba8ee 100644
37540--- a/block/bsg.c
37541+++ b/block/bsg.c
37542@@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
37543 struct sg_io_v4 *hdr, struct bsg_device *bd,
37544 fmode_t has_write_perm)
37545 {
37546+ unsigned char tmpcmd[sizeof(rq->__cmd)];
37547+ unsigned char *cmdptr;
37548+
37549 if (hdr->request_len > BLK_MAX_CDB) {
37550 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
37551 if (!rq->cmd)
37552 return -ENOMEM;
37553- }
37554+ cmdptr = rq->cmd;
37555+ } else
37556+ cmdptr = tmpcmd;
37557
37558- if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
37559+ if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
37560 hdr->request_len))
37561 return -EFAULT;
37562
37563+ if (cmdptr != rq->cmd)
37564+ memcpy(rq->cmd, cmdptr, hdr->request_len);
37565+
37566 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
37567 if (blk_verify_command(rq->cmd, has_write_perm))
37568 return -EPERM;
37569diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
37570index 18b282c..050dbe5 100644
37571--- a/block/compat_ioctl.c
37572+++ b/block/compat_ioctl.c
37573@@ -156,7 +156,7 @@ static int compat_cdrom_generic_command(struct block_device *bdev, fmode_t mode,
37574 cgc = compat_alloc_user_space(sizeof(*cgc));
37575 cgc32 = compat_ptr(arg);
37576
37577- if (copy_in_user(&cgc->cmd, &cgc32->cmd, sizeof(cgc->cmd)) ||
37578+ if (copy_in_user(cgc->cmd, cgc32->cmd, sizeof(cgc->cmd)) ||
37579 get_user(data, &cgc32->buffer) ||
37580 put_user(compat_ptr(data), &cgc->buffer) ||
37581 copy_in_user(&cgc->buflen, &cgc32->buflen,
37582@@ -341,7 +341,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
37583 err |= __get_user(f->spec1, &uf->spec1);
37584 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
37585 err |= __get_user(name, &uf->name);
37586- f->name = compat_ptr(name);
37587+ f->name = (void __force_kernel *)compat_ptr(name);
37588 if (err) {
37589 err = -EFAULT;
37590 goto out;
37591diff --git a/block/genhd.c b/block/genhd.c
37592index e6723bd..703e4ac 100644
37593--- a/block/genhd.c
37594+++ b/block/genhd.c
37595@@ -469,21 +469,24 @@ static char *bdevt_str(dev_t devt, char *buf)
37596
37597 /*
37598 * Register device numbers dev..(dev+range-1)
37599- * range must be nonzero
37600+ * Noop if @range is zero.
37601 * The hash chain is sorted on range, so that subranges can override.
37602 */
37603 void blk_register_region(dev_t devt, unsigned long range, struct module *module,
37604 struct kobject *(*probe)(dev_t, int *, void *),
37605 int (*lock)(dev_t, void *), void *data)
37606 {
37607- kobj_map(bdev_map, devt, range, module, probe, lock, data);
37608+ if (range)
37609+ kobj_map(bdev_map, devt, range, module, probe, lock, data);
37610 }
37611
37612 EXPORT_SYMBOL(blk_register_region);
37613
37614+/* undo blk_register_region(), noop if @range is zero */
37615 void blk_unregister_region(dev_t devt, unsigned long range)
37616 {
37617- kobj_unmap(bdev_map, devt, range);
37618+ if (range)
37619+ kobj_unmap(bdev_map, devt, range);
37620 }
37621
37622 EXPORT_SYMBOL(blk_unregister_region);
37623diff --git a/block/partitions/efi.c b/block/partitions/efi.c
37624index 56d08fd..2e07090 100644
37625--- a/block/partitions/efi.c
37626+++ b/block/partitions/efi.c
37627@@ -293,14 +293,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
37628 if (!gpt)
37629 return NULL;
37630
37631+ if (!le32_to_cpu(gpt->num_partition_entries))
37632+ return NULL;
37633+ pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
37634+ if (!pte)
37635+ return NULL;
37636+
37637 count = le32_to_cpu(gpt->num_partition_entries) *
37638 le32_to_cpu(gpt->sizeof_partition_entry);
37639- if (!count)
37640- return NULL;
37641- pte = kmalloc(count, GFP_KERNEL);
37642- if (!pte)
37643- return NULL;
37644-
37645 if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
37646 (u8 *) pte, count) < count) {
37647 kfree(pte);
37648diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
37649index 9b8eaec..c20279a 100644
37650--- a/block/scsi_ioctl.c
37651+++ b/block/scsi_ioctl.c
37652@@ -67,7 +67,7 @@ static int scsi_get_bus(struct request_queue *q, int __user *p)
37653 return put_user(0, p);
37654 }
37655
37656-static int sg_get_timeout(struct request_queue *q)
37657+static int __intentional_overflow(-1) sg_get_timeout(struct request_queue *q)
37658 {
37659 return jiffies_to_clock_t(q->sg_timeout);
37660 }
37661@@ -227,8 +227,20 @@ EXPORT_SYMBOL(blk_verify_command);
37662 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
37663 struct sg_io_hdr *hdr, fmode_t mode)
37664 {
37665- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
37666+ unsigned char tmpcmd[sizeof(rq->__cmd)];
37667+ unsigned char *cmdptr;
37668+
37669+ if (rq->cmd != rq->__cmd)
37670+ cmdptr = rq->cmd;
37671+ else
37672+ cmdptr = tmpcmd;
37673+
37674+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
37675 return -EFAULT;
37676+
37677+ if (cmdptr != rq->cmd)
37678+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
37679+
37680 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
37681 return -EPERM;
37682
37683@@ -432,6 +444,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
37684 int err;
37685 unsigned int in_len, out_len, bytes, opcode, cmdlen;
37686 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
37687+ unsigned char tmpcmd[sizeof(rq->__cmd)];
37688+ unsigned char *cmdptr;
37689
37690 if (!sic)
37691 return -EINVAL;
37692@@ -470,9 +484,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
37693 */
37694 err = -EFAULT;
37695 rq->cmd_len = cmdlen;
37696- if (copy_from_user(rq->cmd, sic->data, cmdlen))
37697+
37698+ if (rq->cmd != rq->__cmd)
37699+ cmdptr = rq->cmd;
37700+ else
37701+ cmdptr = tmpcmd;
37702+
37703+ if (copy_from_user(cmdptr, sic->data, cmdlen))
37704 goto error;
37705
37706+ if (rq->cmd != cmdptr)
37707+ memcpy(rq->cmd, cmdptr, cmdlen);
37708+
37709 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
37710 goto error;
37711
37712diff --git a/crypto/cryptd.c b/crypto/cryptd.c
37713index e592c90..c566114 100644
37714--- a/crypto/cryptd.c
37715+++ b/crypto/cryptd.c
37716@@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
37717
37718 struct cryptd_blkcipher_request_ctx {
37719 crypto_completion_t complete;
37720-};
37721+} __no_const;
37722
37723 struct cryptd_hash_ctx {
37724 struct crypto_shash *child;
37725@@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
37726
37727 struct cryptd_aead_request_ctx {
37728 crypto_completion_t complete;
37729-};
37730+} __no_const;
37731
37732 static void cryptd_queue_worker(struct work_struct *work);
37733
37734diff --git a/crypto/cts.c b/crypto/cts.c
37735index 042223f..133f087 100644
37736--- a/crypto/cts.c
37737+++ b/crypto/cts.c
37738@@ -202,7 +202,8 @@ static int cts_cbc_decrypt(struct crypto_cts_ctx *ctx,
37739 /* 5. Append the tail (BB - Ln) bytes of Xn (tmp) to Cn to create En */
37740 memcpy(s + bsize + lastn, tmp + lastn, bsize - lastn);
37741 /* 6. Decrypt En to create Pn-1 */
37742- memset(iv, 0, sizeof(iv));
37743+ memzero_explicit(iv, sizeof(iv));
37744+
37745 sg_set_buf(&sgsrc[0], s + bsize, bsize);
37746 sg_set_buf(&sgdst[0], d, bsize);
37747 err = crypto_blkcipher_decrypt_iv(&lcldesc, sgdst, sgsrc, bsize);
37748diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
37749index 309d345..1632720 100644
37750--- a/crypto/pcrypt.c
37751+++ b/crypto/pcrypt.c
37752@@ -440,7 +440,7 @@ static int pcrypt_sysfs_add(struct padata_instance *pinst, const char *name)
37753 int ret;
37754
37755 pinst->kobj.kset = pcrypt_kset;
37756- ret = kobject_add(&pinst->kobj, NULL, name);
37757+ ret = kobject_add(&pinst->kobj, NULL, "%s", name);
37758 if (!ret)
37759 kobject_uevent(&pinst->kobj, KOBJ_ADD);
37760
37761diff --git a/crypto/sha1_generic.c b/crypto/sha1_generic.c
37762index 4279480..7bb0474 100644
37763--- a/crypto/sha1_generic.c
37764+++ b/crypto/sha1_generic.c
37765@@ -64,7 +64,7 @@ int crypto_sha1_update(struct shash_desc *desc, const u8 *data,
37766 src = data + done;
37767 } while (done + SHA1_BLOCK_SIZE <= len);
37768
37769- memset(temp, 0, sizeof(temp));
37770+ memzero_explicit(temp, sizeof(temp));
37771 partial = 0;
37772 }
37773 memcpy(sctx->buffer + partial, src, len - done);
37774diff --git a/crypto/sha256_generic.c b/crypto/sha256_generic.c
37775index 5433667..32c5e5e 100644
37776--- a/crypto/sha256_generic.c
37777+++ b/crypto/sha256_generic.c
37778@@ -210,10 +210,9 @@ static void sha256_transform(u32 *state, const u8 *input)
37779
37780 /* clear any sensitive info... */
37781 a = b = c = d = e = f = g = h = t1 = t2 = 0;
37782- memset(W, 0, 64 * sizeof(u32));
37783+ memzero_explicit(W, 64 * sizeof(u32));
37784 }
37785
37786-
37787 static int sha224_init(struct shash_desc *desc)
37788 {
37789 struct sha256_state *sctx = shash_desc_ctx(desc);
37790@@ -316,7 +315,7 @@ static int sha224_final(struct shash_desc *desc, u8 *hash)
37791 sha256_final(desc, D);
37792
37793 memcpy(hash, D, SHA224_DIGEST_SIZE);
37794- memset(D, 0, SHA256_DIGEST_SIZE);
37795+ memzero_explicit(D, SHA256_DIGEST_SIZE);
37796
37797 return 0;
37798 }
37799diff --git a/crypto/sha512_generic.c b/crypto/sha512_generic.c
37800index 6ed124f..04d295a 100644
37801--- a/crypto/sha512_generic.c
37802+++ b/crypto/sha512_generic.c
37803@@ -238,7 +238,7 @@ static int sha384_final(struct shash_desc *desc, u8 *hash)
37804 sha512_final(desc, D);
37805
37806 memcpy(hash, D, 48);
37807- memset(D, 0, 64);
37808+ memzero_explicit(D, 64);
37809
37810 return 0;
37811 }
37812diff --git a/crypto/tgr192.c b/crypto/tgr192.c
37813index 8740355..3c7af0d 100644
37814--- a/crypto/tgr192.c
37815+++ b/crypto/tgr192.c
37816@@ -612,7 +612,7 @@ static int tgr160_final(struct shash_desc *desc, u8 * out)
37817
37818 tgr192_final(desc, D);
37819 memcpy(out, D, TGR160_DIGEST_SIZE);
37820- memset(D, 0, TGR192_DIGEST_SIZE);
37821+ memzero_explicit(D, TGR192_DIGEST_SIZE);
37822
37823 return 0;
37824 }
37825@@ -623,7 +623,7 @@ static int tgr128_final(struct shash_desc *desc, u8 * out)
37826
37827 tgr192_final(desc, D);
37828 memcpy(out, D, TGR128_DIGEST_SIZE);
37829- memset(D, 0, TGR192_DIGEST_SIZE);
37830+ memzero_explicit(D, TGR192_DIGEST_SIZE);
37831
37832 return 0;
37833 }
37834diff --git a/crypto/vmac.c b/crypto/vmac.c
37835index 2eb11a3..d84c24b 100644
37836--- a/crypto/vmac.c
37837+++ b/crypto/vmac.c
37838@@ -613,7 +613,7 @@ static int vmac_final(struct shash_desc *pdesc, u8 *out)
37839 }
37840 mac = vmac(ctx->partial, ctx->partial_size, nonce, NULL, ctx);
37841 memcpy(out, &mac, sizeof(vmac_t));
37842- memset(&mac, 0, sizeof(vmac_t));
37843+ memzero_explicit(&mac, sizeof(vmac_t));
37844 memset(&ctx->__vmac_ctx, 0, sizeof(struct vmac_ctx));
37845 ctx->partial_size = 0;
37846 return 0;
37847diff --git a/crypto/wp512.c b/crypto/wp512.c
37848index 180f1d6..ec64e77 100644
37849--- a/crypto/wp512.c
37850+++ b/crypto/wp512.c
37851@@ -1102,8 +1102,8 @@ static int wp384_final(struct shash_desc *desc, u8 *out)
37852 u8 D[64];
37853
37854 wp512_final(desc, D);
37855- memcpy (out, D, WP384_DIGEST_SIZE);
37856- memset (D, 0, WP512_DIGEST_SIZE);
37857+ memcpy(out, D, WP384_DIGEST_SIZE);
37858+ memzero_explicit(D, WP512_DIGEST_SIZE);
37859
37860 return 0;
37861 }
37862@@ -1113,8 +1113,8 @@ static int wp256_final(struct shash_desc *desc, u8 *out)
37863 u8 D[64];
37864
37865 wp512_final(desc, D);
37866- memcpy (out, D, WP256_DIGEST_SIZE);
37867- memset (D, 0, WP512_DIGEST_SIZE);
37868+ memcpy(out, D, WP256_DIGEST_SIZE);
37869+ memzero_explicit(D, WP512_DIGEST_SIZE);
37870
37871 return 0;
37872 }
37873diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c
37874index 6921c7f..78e1af7 100644
37875--- a/drivers/acpi/acpica/hwxfsleep.c
37876+++ b/drivers/acpi/acpica/hwxfsleep.c
37877@@ -63,11 +63,12 @@ static acpi_status acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id);
37878 /* Legacy functions are optional, based upon ACPI_REDUCED_HARDWARE */
37879
37880 static struct acpi_sleep_functions acpi_sleep_dispatch[] = {
37881- {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_sleep),
37882- acpi_hw_extended_sleep},
37883- {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake_prep),
37884- acpi_hw_extended_wake_prep},
37885- {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake), acpi_hw_extended_wake}
37886+ {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_sleep),
37887+ .extended_function = acpi_hw_extended_sleep},
37888+ {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake_prep),
37889+ .extended_function = acpi_hw_extended_wake_prep},
37890+ {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake),
37891+ .extended_function = acpi_hw_extended_wake}
37892 };
37893
37894 /*
37895diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
37896index 16129c7..8b675cd 100644
37897--- a/drivers/acpi/apei/apei-internal.h
37898+++ b/drivers/acpi/apei/apei-internal.h
37899@@ -19,7 +19,7 @@ typedef int (*apei_exec_ins_func_t)(struct apei_exec_context *ctx,
37900 struct apei_exec_ins_type {
37901 u32 flags;
37902 apei_exec_ins_func_t run;
37903-};
37904+} __do_const;
37905
37906 struct apei_exec_context {
37907 u32 ip;
37908diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
37909index fc5f780..e5ac91a 100644
37910--- a/drivers/acpi/apei/ghes.c
37911+++ b/drivers/acpi/apei/ghes.c
37912@@ -478,7 +478,7 @@ static void __ghes_print_estatus(const char *pfx,
37913 const struct acpi_hest_generic *generic,
37914 const struct acpi_hest_generic_status *estatus)
37915 {
37916- static atomic_t seqno;
37917+ static atomic_unchecked_t seqno;
37918 unsigned int curr_seqno;
37919 char pfx_seq[64];
37920
37921@@ -489,7 +489,7 @@ static void __ghes_print_estatus(const char *pfx,
37922 else
37923 pfx = KERN_ERR;
37924 }
37925- curr_seqno = atomic_inc_return(&seqno);
37926+ curr_seqno = atomic_inc_return_unchecked(&seqno);
37927 snprintf(pfx_seq, sizeof(pfx_seq), "%s{%u}" HW_ERR, pfx, curr_seqno);
37928 printk("%s""Hardware error from APEI Generic Hardware Error Source: %d\n",
37929 pfx_seq, generic->header.source_id);
37930diff --git a/drivers/acpi/bgrt.c b/drivers/acpi/bgrt.c
37931index a83e3c6..c3d617f 100644
37932--- a/drivers/acpi/bgrt.c
37933+++ b/drivers/acpi/bgrt.c
37934@@ -86,8 +86,10 @@ static int __init bgrt_init(void)
37935 if (!bgrt_image)
37936 return -ENODEV;
37937
37938- bin_attr_image.private = bgrt_image;
37939- bin_attr_image.size = bgrt_image_size;
37940+ pax_open_kernel();
37941+ *(void **)&bin_attr_image.private = bgrt_image;
37942+ *(size_t *)&bin_attr_image.size = bgrt_image_size;
37943+ pax_close_kernel();
37944
37945 bgrt_kobj = kobject_create_and_add("bgrt", acpi_kobj);
37946 if (!bgrt_kobj)
37947diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
37948index 36eb42e..3b2f47e 100644
37949--- a/drivers/acpi/blacklist.c
37950+++ b/drivers/acpi/blacklist.c
37951@@ -51,7 +51,7 @@ struct acpi_blacklist_item {
37952 u32 is_critical_error;
37953 };
37954
37955-static struct dmi_system_id acpi_osi_dmi_table[] __initdata;
37956+static const struct dmi_system_id acpi_osi_dmi_table[] __initconst;
37957
37958 /*
37959 * POLICY: If *anything* doesn't work, put it on the blacklist.
37960@@ -163,7 +163,7 @@ static int __init dmi_disable_osi_win8(const struct dmi_system_id *d)
37961 return 0;
37962 }
37963
37964-static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
37965+static const struct dmi_system_id acpi_osi_dmi_table[] __initconst = {
37966 {
37967 .callback = dmi_disable_osi_vista,
37968 .ident = "Fujitsu Siemens",
37969diff --git a/drivers/acpi/custom_method.c b/drivers/acpi/custom_method.c
37970index c68e724..e863008 100644
37971--- a/drivers/acpi/custom_method.c
37972+++ b/drivers/acpi/custom_method.c
37973@@ -29,6 +29,10 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
37974 struct acpi_table_header table;
37975 acpi_status status;
37976
37977+#ifdef CONFIG_GRKERNSEC_KMEM
37978+ return -EPERM;
37979+#endif
37980+
37981 if (!(*ppos)) {
37982 /* parse the table header to get the table length */
37983 if (count <= sizeof(struct acpi_table_header))
37984diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
37985index 17f9ec5..d9a455e 100644
37986--- a/drivers/acpi/processor_idle.c
37987+++ b/drivers/acpi/processor_idle.c
37988@@ -952,7 +952,7 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
37989 {
37990 int i, count = CPUIDLE_DRIVER_STATE_START;
37991 struct acpi_processor_cx *cx;
37992- struct cpuidle_state *state;
37993+ cpuidle_state_no_const *state;
37994 struct cpuidle_driver *drv = &acpi_idle_driver;
37995
37996 if (!pr->flags.power_setup_done)
37997diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
37998index 38cb978..352c761 100644
37999--- a/drivers/acpi/sysfs.c
38000+++ b/drivers/acpi/sysfs.c
38001@@ -423,11 +423,11 @@ static u32 num_counters;
38002 static struct attribute **all_attrs;
38003 static u32 acpi_gpe_count;
38004
38005-static struct attribute_group interrupt_stats_attr_group = {
38006+static attribute_group_no_const interrupt_stats_attr_group = {
38007 .name = "interrupts",
38008 };
38009
38010-static struct kobj_attribute *counter_attrs;
38011+static kobj_attribute_no_const *counter_attrs;
38012
38013 static void delete_gpe_attr_array(void)
38014 {
38015diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
38016index b784e9d..a69a049 100644
38017--- a/drivers/ata/libahci.c
38018+++ b/drivers/ata/libahci.c
38019@@ -1252,7 +1252,7 @@ int ahci_kick_engine(struct ata_port *ap)
38020 }
38021 EXPORT_SYMBOL_GPL(ahci_kick_engine);
38022
38023-static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
38024+static int __intentional_overflow(-1) ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
38025 struct ata_taskfile *tf, int is_cmd, u16 flags,
38026 unsigned long timeout_msec)
38027 {
38028diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
38029index 6f67490..f951ead 100644
38030--- a/drivers/ata/libata-core.c
38031+++ b/drivers/ata/libata-core.c
38032@@ -99,7 +99,7 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
38033 static void ata_dev_xfermask(struct ata_device *dev);
38034 static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
38035
38036-atomic_t ata_print_id = ATOMIC_INIT(0);
38037+atomic_unchecked_t ata_print_id = ATOMIC_INIT(0);
38038
38039 struct ata_force_param {
38040 const char *name;
38041@@ -4797,7 +4797,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
38042 struct ata_port *ap;
38043 unsigned int tag;
38044
38045- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
38046+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
38047 ap = qc->ap;
38048
38049 qc->flags = 0;
38050@@ -4813,7 +4813,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
38051 struct ata_port *ap;
38052 struct ata_link *link;
38053
38054- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
38055+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
38056 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
38057 ap = qc->ap;
38058 link = qc->dev->link;
38059@@ -5917,6 +5917,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
38060 return;
38061
38062 spin_lock(&lock);
38063+ pax_open_kernel();
38064
38065 for (cur = ops->inherits; cur; cur = cur->inherits) {
38066 void **inherit = (void **)cur;
38067@@ -5930,8 +5931,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
38068 if (IS_ERR(*pp))
38069 *pp = NULL;
38070
38071- ops->inherits = NULL;
38072+ *(struct ata_port_operations **)&ops->inherits = NULL;
38073
38074+ pax_close_kernel();
38075 spin_unlock(&lock);
38076 }
38077
38078@@ -6127,7 +6129,7 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
38079
38080 /* give ports names and add SCSI hosts */
38081 for (i = 0; i < host->n_ports; i++) {
38082- host->ports[i]->print_id = atomic_inc_return(&ata_print_id);
38083+ host->ports[i]->print_id = atomic_inc_return_unchecked(&ata_print_id);
38084 host->ports[i]->local_port_no = i + 1;
38085 }
38086
38087diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
38088index 0586f66..1a8f74a 100644
38089--- a/drivers/ata/libata-scsi.c
38090+++ b/drivers/ata/libata-scsi.c
38091@@ -4151,7 +4151,7 @@ int ata_sas_port_init(struct ata_port *ap)
38092
38093 if (rc)
38094 return rc;
38095- ap->print_id = atomic_inc_return(&ata_print_id);
38096+ ap->print_id = atomic_inc_return_unchecked(&ata_print_id);
38097 return 0;
38098 }
38099 EXPORT_SYMBOL_GPL(ata_sas_port_init);
38100diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
38101index 5f4e0cc..ff2c347 100644
38102--- a/drivers/ata/libata.h
38103+++ b/drivers/ata/libata.h
38104@@ -53,7 +53,7 @@ enum {
38105 ATA_DNXFER_QUIET = (1 << 31),
38106 };
38107
38108-extern atomic_t ata_print_id;
38109+extern atomic_unchecked_t ata_print_id;
38110 extern int atapi_passthru16;
38111 extern int libata_fua;
38112 extern int libata_noacpi;
38113diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
38114index 4edb1a8..84e1658 100644
38115--- a/drivers/ata/pata_arasan_cf.c
38116+++ b/drivers/ata/pata_arasan_cf.c
38117@@ -865,7 +865,9 @@ static int arasan_cf_probe(struct platform_device *pdev)
38118 /* Handle platform specific quirks */
38119 if (quirk) {
38120 if (quirk & CF_BROKEN_PIO) {
38121- ap->ops->set_piomode = NULL;
38122+ pax_open_kernel();
38123+ *(void **)&ap->ops->set_piomode = NULL;
38124+ pax_close_kernel();
38125 ap->pio_mask = 0;
38126 }
38127 if (quirk & CF_BROKEN_MWDMA)
38128diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
38129index f9b983a..887b9d8 100644
38130--- a/drivers/atm/adummy.c
38131+++ b/drivers/atm/adummy.c
38132@@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
38133 vcc->pop(vcc, skb);
38134 else
38135 dev_kfree_skb_any(skb);
38136- atomic_inc(&vcc->stats->tx);
38137+ atomic_inc_unchecked(&vcc->stats->tx);
38138
38139 return 0;
38140 }
38141diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
38142index f1a9198..f466a4a 100644
38143--- a/drivers/atm/ambassador.c
38144+++ b/drivers/atm/ambassador.c
38145@@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
38146 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
38147
38148 // VC layer stats
38149- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
38150+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
38151
38152 // free the descriptor
38153 kfree (tx_descr);
38154@@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
38155 dump_skb ("<<<", vc, skb);
38156
38157 // VC layer stats
38158- atomic_inc(&atm_vcc->stats->rx);
38159+ atomic_inc_unchecked(&atm_vcc->stats->rx);
38160 __net_timestamp(skb);
38161 // end of our responsibility
38162 atm_vcc->push (atm_vcc, skb);
38163@@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
38164 } else {
38165 PRINTK (KERN_INFO, "dropped over-size frame");
38166 // should we count this?
38167- atomic_inc(&atm_vcc->stats->rx_drop);
38168+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
38169 }
38170
38171 } else {
38172@@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
38173 }
38174
38175 if (check_area (skb->data, skb->len)) {
38176- atomic_inc(&atm_vcc->stats->tx_err);
38177+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
38178 return -ENOMEM; // ?
38179 }
38180
38181diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
38182index 480fa6f..947067c 100644
38183--- a/drivers/atm/atmtcp.c
38184+++ b/drivers/atm/atmtcp.c
38185@@ -206,7 +206,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
38186 if (vcc->pop) vcc->pop(vcc,skb);
38187 else dev_kfree_skb(skb);
38188 if (dev_data) return 0;
38189- atomic_inc(&vcc->stats->tx_err);
38190+ atomic_inc_unchecked(&vcc->stats->tx_err);
38191 return -ENOLINK;
38192 }
38193 size = skb->len+sizeof(struct atmtcp_hdr);
38194@@ -214,7 +214,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
38195 if (!new_skb) {
38196 if (vcc->pop) vcc->pop(vcc,skb);
38197 else dev_kfree_skb(skb);
38198- atomic_inc(&vcc->stats->tx_err);
38199+ atomic_inc_unchecked(&vcc->stats->tx_err);
38200 return -ENOBUFS;
38201 }
38202 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
38203@@ -225,8 +225,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
38204 if (vcc->pop) vcc->pop(vcc,skb);
38205 else dev_kfree_skb(skb);
38206 out_vcc->push(out_vcc,new_skb);
38207- atomic_inc(&vcc->stats->tx);
38208- atomic_inc(&out_vcc->stats->rx);
38209+ atomic_inc_unchecked(&vcc->stats->tx);
38210+ atomic_inc_unchecked(&out_vcc->stats->rx);
38211 return 0;
38212 }
38213
38214@@ -300,7 +300,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
38215 read_unlock(&vcc_sklist_lock);
38216 if (!out_vcc) {
38217 result = -EUNATCH;
38218- atomic_inc(&vcc->stats->tx_err);
38219+ atomic_inc_unchecked(&vcc->stats->tx_err);
38220 goto done;
38221 }
38222 skb_pull(skb,sizeof(struct atmtcp_hdr));
38223@@ -312,8 +312,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
38224 __net_timestamp(new_skb);
38225 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
38226 out_vcc->push(out_vcc,new_skb);
38227- atomic_inc(&vcc->stats->tx);
38228- atomic_inc(&out_vcc->stats->rx);
38229+ atomic_inc_unchecked(&vcc->stats->tx);
38230+ atomic_inc_unchecked(&out_vcc->stats->rx);
38231 done:
38232 if (vcc->pop) vcc->pop(vcc,skb);
38233 else dev_kfree_skb(skb);
38234diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
38235index d65975a..0b87e20 100644
38236--- a/drivers/atm/eni.c
38237+++ b/drivers/atm/eni.c
38238@@ -522,7 +522,7 @@ static int rx_aal0(struct atm_vcc *vcc)
38239 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
38240 vcc->dev->number);
38241 length = 0;
38242- atomic_inc(&vcc->stats->rx_err);
38243+ atomic_inc_unchecked(&vcc->stats->rx_err);
38244 }
38245 else {
38246 length = ATM_CELL_SIZE-1; /* no HEC */
38247@@ -577,7 +577,7 @@ static int rx_aal5(struct atm_vcc *vcc)
38248 size);
38249 }
38250 eff = length = 0;
38251- atomic_inc(&vcc->stats->rx_err);
38252+ atomic_inc_unchecked(&vcc->stats->rx_err);
38253 }
38254 else {
38255 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
38256@@ -594,7 +594,7 @@ static int rx_aal5(struct atm_vcc *vcc)
38257 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
38258 vcc->dev->number,vcc->vci,length,size << 2,descr);
38259 length = eff = 0;
38260- atomic_inc(&vcc->stats->rx_err);
38261+ atomic_inc_unchecked(&vcc->stats->rx_err);
38262 }
38263 }
38264 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
38265@@ -767,7 +767,7 @@ rx_dequeued++;
38266 vcc->push(vcc,skb);
38267 pushed++;
38268 }
38269- atomic_inc(&vcc->stats->rx);
38270+ atomic_inc_unchecked(&vcc->stats->rx);
38271 }
38272 wake_up(&eni_dev->rx_wait);
38273 }
38274@@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *dev)
38275 PCI_DMA_TODEVICE);
38276 if (vcc->pop) vcc->pop(vcc,skb);
38277 else dev_kfree_skb_irq(skb);
38278- atomic_inc(&vcc->stats->tx);
38279+ atomic_inc_unchecked(&vcc->stats->tx);
38280 wake_up(&eni_dev->tx_wait);
38281 dma_complete++;
38282 }
38283diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
38284index 82f2ae0..f205c02 100644
38285--- a/drivers/atm/firestream.c
38286+++ b/drivers/atm/firestream.c
38287@@ -749,7 +749,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
38288 }
38289 }
38290
38291- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
38292+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
38293
38294 fs_dprintk (FS_DEBUG_TXMEM, "i");
38295 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
38296@@ -816,7 +816,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
38297 #endif
38298 skb_put (skb, qe->p1 & 0xffff);
38299 ATM_SKB(skb)->vcc = atm_vcc;
38300- atomic_inc(&atm_vcc->stats->rx);
38301+ atomic_inc_unchecked(&atm_vcc->stats->rx);
38302 __net_timestamp(skb);
38303 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
38304 atm_vcc->push (atm_vcc, skb);
38305@@ -837,12 +837,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
38306 kfree (pe);
38307 }
38308 if (atm_vcc)
38309- atomic_inc(&atm_vcc->stats->rx_drop);
38310+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
38311 break;
38312 case 0x1f: /* Reassembly abort: no buffers. */
38313 /* Silently increment error counter. */
38314 if (atm_vcc)
38315- atomic_inc(&atm_vcc->stats->rx_drop);
38316+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
38317 break;
38318 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
38319 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
38320diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
38321index d4725fc..2d4ea65 100644
38322--- a/drivers/atm/fore200e.c
38323+++ b/drivers/atm/fore200e.c
38324@@ -931,9 +931,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
38325 #endif
38326 /* check error condition */
38327 if (*entry->status & STATUS_ERROR)
38328- atomic_inc(&vcc->stats->tx_err);
38329+ atomic_inc_unchecked(&vcc->stats->tx_err);
38330 else
38331- atomic_inc(&vcc->stats->tx);
38332+ atomic_inc_unchecked(&vcc->stats->tx);
38333 }
38334 }
38335
38336@@ -1082,7 +1082,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
38337 if (skb == NULL) {
38338 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
38339
38340- atomic_inc(&vcc->stats->rx_drop);
38341+ atomic_inc_unchecked(&vcc->stats->rx_drop);
38342 return -ENOMEM;
38343 }
38344
38345@@ -1125,14 +1125,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
38346
38347 dev_kfree_skb_any(skb);
38348
38349- atomic_inc(&vcc->stats->rx_drop);
38350+ atomic_inc_unchecked(&vcc->stats->rx_drop);
38351 return -ENOMEM;
38352 }
38353
38354 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
38355
38356 vcc->push(vcc, skb);
38357- atomic_inc(&vcc->stats->rx);
38358+ atomic_inc_unchecked(&vcc->stats->rx);
38359
38360 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
38361
38362@@ -1210,7 +1210,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
38363 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
38364 fore200e->atm_dev->number,
38365 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
38366- atomic_inc(&vcc->stats->rx_err);
38367+ atomic_inc_unchecked(&vcc->stats->rx_err);
38368 }
38369 }
38370
38371@@ -1655,7 +1655,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
38372 goto retry_here;
38373 }
38374
38375- atomic_inc(&vcc->stats->tx_err);
38376+ atomic_inc_unchecked(&vcc->stats->tx_err);
38377
38378 fore200e->tx_sat++;
38379 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
38380diff --git a/drivers/atm/he.c b/drivers/atm/he.c
38381index c39702b..785b73b 100644
38382--- a/drivers/atm/he.c
38383+++ b/drivers/atm/he.c
38384@@ -1689,7 +1689,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
38385
38386 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
38387 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
38388- atomic_inc(&vcc->stats->rx_drop);
38389+ atomic_inc_unchecked(&vcc->stats->rx_drop);
38390 goto return_host_buffers;
38391 }
38392
38393@@ -1716,7 +1716,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
38394 RBRQ_LEN_ERR(he_dev->rbrq_head)
38395 ? "LEN_ERR" : "",
38396 vcc->vpi, vcc->vci);
38397- atomic_inc(&vcc->stats->rx_err);
38398+ atomic_inc_unchecked(&vcc->stats->rx_err);
38399 goto return_host_buffers;
38400 }
38401
38402@@ -1768,7 +1768,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
38403 vcc->push(vcc, skb);
38404 spin_lock(&he_dev->global_lock);
38405
38406- atomic_inc(&vcc->stats->rx);
38407+ atomic_inc_unchecked(&vcc->stats->rx);
38408
38409 return_host_buffers:
38410 ++pdus_assembled;
38411@@ -2094,7 +2094,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
38412 tpd->vcc->pop(tpd->vcc, tpd->skb);
38413 else
38414 dev_kfree_skb_any(tpd->skb);
38415- atomic_inc(&tpd->vcc->stats->tx_err);
38416+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
38417 }
38418 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
38419 return;
38420@@ -2506,7 +2506,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
38421 vcc->pop(vcc, skb);
38422 else
38423 dev_kfree_skb_any(skb);
38424- atomic_inc(&vcc->stats->tx_err);
38425+ atomic_inc_unchecked(&vcc->stats->tx_err);
38426 return -EINVAL;
38427 }
38428
38429@@ -2517,7 +2517,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
38430 vcc->pop(vcc, skb);
38431 else
38432 dev_kfree_skb_any(skb);
38433- atomic_inc(&vcc->stats->tx_err);
38434+ atomic_inc_unchecked(&vcc->stats->tx_err);
38435 return -EINVAL;
38436 }
38437 #endif
38438@@ -2529,7 +2529,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
38439 vcc->pop(vcc, skb);
38440 else
38441 dev_kfree_skb_any(skb);
38442- atomic_inc(&vcc->stats->tx_err);
38443+ atomic_inc_unchecked(&vcc->stats->tx_err);
38444 spin_unlock_irqrestore(&he_dev->global_lock, flags);
38445 return -ENOMEM;
38446 }
38447@@ -2571,7 +2571,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
38448 vcc->pop(vcc, skb);
38449 else
38450 dev_kfree_skb_any(skb);
38451- atomic_inc(&vcc->stats->tx_err);
38452+ atomic_inc_unchecked(&vcc->stats->tx_err);
38453 spin_unlock_irqrestore(&he_dev->global_lock, flags);
38454 return -ENOMEM;
38455 }
38456@@ -2602,7 +2602,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
38457 __enqueue_tpd(he_dev, tpd, cid);
38458 spin_unlock_irqrestore(&he_dev->global_lock, flags);
38459
38460- atomic_inc(&vcc->stats->tx);
38461+ atomic_inc_unchecked(&vcc->stats->tx);
38462
38463 return 0;
38464 }
38465diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
38466index 1dc0519..1aadaf7 100644
38467--- a/drivers/atm/horizon.c
38468+++ b/drivers/atm/horizon.c
38469@@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
38470 {
38471 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
38472 // VC layer stats
38473- atomic_inc(&vcc->stats->rx);
38474+ atomic_inc_unchecked(&vcc->stats->rx);
38475 __net_timestamp(skb);
38476 // end of our responsibility
38477 vcc->push (vcc, skb);
38478@@ -1186,7 +1186,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
38479 dev->tx_iovec = NULL;
38480
38481 // VC layer stats
38482- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
38483+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
38484
38485 // free the skb
38486 hrz_kfree_skb (skb);
38487diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
38488index 2b24ed0..b3d6acc 100644
38489--- a/drivers/atm/idt77252.c
38490+++ b/drivers/atm/idt77252.c
38491@@ -810,7 +810,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
38492 else
38493 dev_kfree_skb(skb);
38494
38495- atomic_inc(&vcc->stats->tx);
38496+ atomic_inc_unchecked(&vcc->stats->tx);
38497 }
38498
38499 atomic_dec(&scq->used);
38500@@ -1072,13 +1072,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
38501 if ((sb = dev_alloc_skb(64)) == NULL) {
38502 printk("%s: Can't allocate buffers for aal0.\n",
38503 card->name);
38504- atomic_add(i, &vcc->stats->rx_drop);
38505+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
38506 break;
38507 }
38508 if (!atm_charge(vcc, sb->truesize)) {
38509 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
38510 card->name);
38511- atomic_add(i - 1, &vcc->stats->rx_drop);
38512+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
38513 dev_kfree_skb(sb);
38514 break;
38515 }
38516@@ -1095,7 +1095,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
38517 ATM_SKB(sb)->vcc = vcc;
38518 __net_timestamp(sb);
38519 vcc->push(vcc, sb);
38520- atomic_inc(&vcc->stats->rx);
38521+ atomic_inc_unchecked(&vcc->stats->rx);
38522
38523 cell += ATM_CELL_PAYLOAD;
38524 }
38525@@ -1132,13 +1132,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
38526 "(CDC: %08x)\n",
38527 card->name, len, rpp->len, readl(SAR_REG_CDC));
38528 recycle_rx_pool_skb(card, rpp);
38529- atomic_inc(&vcc->stats->rx_err);
38530+ atomic_inc_unchecked(&vcc->stats->rx_err);
38531 return;
38532 }
38533 if (stat & SAR_RSQE_CRC) {
38534 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
38535 recycle_rx_pool_skb(card, rpp);
38536- atomic_inc(&vcc->stats->rx_err);
38537+ atomic_inc_unchecked(&vcc->stats->rx_err);
38538 return;
38539 }
38540 if (skb_queue_len(&rpp->queue) > 1) {
38541@@ -1149,7 +1149,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
38542 RXPRINTK("%s: Can't alloc RX skb.\n",
38543 card->name);
38544 recycle_rx_pool_skb(card, rpp);
38545- atomic_inc(&vcc->stats->rx_err);
38546+ atomic_inc_unchecked(&vcc->stats->rx_err);
38547 return;
38548 }
38549 if (!atm_charge(vcc, skb->truesize)) {
38550@@ -1168,7 +1168,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
38551 __net_timestamp(skb);
38552
38553 vcc->push(vcc, skb);
38554- atomic_inc(&vcc->stats->rx);
38555+ atomic_inc_unchecked(&vcc->stats->rx);
38556
38557 return;
38558 }
38559@@ -1190,7 +1190,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
38560 __net_timestamp(skb);
38561
38562 vcc->push(vcc, skb);
38563- atomic_inc(&vcc->stats->rx);
38564+ atomic_inc_unchecked(&vcc->stats->rx);
38565
38566 if (skb->truesize > SAR_FB_SIZE_3)
38567 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
38568@@ -1301,14 +1301,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
38569 if (vcc->qos.aal != ATM_AAL0) {
38570 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
38571 card->name, vpi, vci);
38572- atomic_inc(&vcc->stats->rx_drop);
38573+ atomic_inc_unchecked(&vcc->stats->rx_drop);
38574 goto drop;
38575 }
38576
38577 if ((sb = dev_alloc_skb(64)) == NULL) {
38578 printk("%s: Can't allocate buffers for AAL0.\n",
38579 card->name);
38580- atomic_inc(&vcc->stats->rx_err);
38581+ atomic_inc_unchecked(&vcc->stats->rx_err);
38582 goto drop;
38583 }
38584
38585@@ -1327,7 +1327,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
38586 ATM_SKB(sb)->vcc = vcc;
38587 __net_timestamp(sb);
38588 vcc->push(vcc, sb);
38589- atomic_inc(&vcc->stats->rx);
38590+ atomic_inc_unchecked(&vcc->stats->rx);
38591
38592 drop:
38593 skb_pull(queue, 64);
38594@@ -1952,13 +1952,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
38595
38596 if (vc == NULL) {
38597 printk("%s: NULL connection in send().\n", card->name);
38598- atomic_inc(&vcc->stats->tx_err);
38599+ atomic_inc_unchecked(&vcc->stats->tx_err);
38600 dev_kfree_skb(skb);
38601 return -EINVAL;
38602 }
38603 if (!test_bit(VCF_TX, &vc->flags)) {
38604 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
38605- atomic_inc(&vcc->stats->tx_err);
38606+ atomic_inc_unchecked(&vcc->stats->tx_err);
38607 dev_kfree_skb(skb);
38608 return -EINVAL;
38609 }
38610@@ -1970,14 +1970,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
38611 break;
38612 default:
38613 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
38614- atomic_inc(&vcc->stats->tx_err);
38615+ atomic_inc_unchecked(&vcc->stats->tx_err);
38616 dev_kfree_skb(skb);
38617 return -EINVAL;
38618 }
38619
38620 if (skb_shinfo(skb)->nr_frags != 0) {
38621 printk("%s: No scatter-gather yet.\n", card->name);
38622- atomic_inc(&vcc->stats->tx_err);
38623+ atomic_inc_unchecked(&vcc->stats->tx_err);
38624 dev_kfree_skb(skb);
38625 return -EINVAL;
38626 }
38627@@ -1985,7 +1985,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
38628
38629 err = queue_skb(card, vc, skb, oam);
38630 if (err) {
38631- atomic_inc(&vcc->stats->tx_err);
38632+ atomic_inc_unchecked(&vcc->stats->tx_err);
38633 dev_kfree_skb(skb);
38634 return err;
38635 }
38636@@ -2008,7 +2008,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
38637 skb = dev_alloc_skb(64);
38638 if (!skb) {
38639 printk("%s: Out of memory in send_oam().\n", card->name);
38640- atomic_inc(&vcc->stats->tx_err);
38641+ atomic_inc_unchecked(&vcc->stats->tx_err);
38642 return -ENOMEM;
38643 }
38644 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
38645diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
38646index 4217f29..88f547a 100644
38647--- a/drivers/atm/iphase.c
38648+++ b/drivers/atm/iphase.c
38649@@ -1145,7 +1145,7 @@ static int rx_pkt(struct atm_dev *dev)
38650 status = (u_short) (buf_desc_ptr->desc_mode);
38651 if (status & (RX_CER | RX_PTE | RX_OFL))
38652 {
38653- atomic_inc(&vcc->stats->rx_err);
38654+ atomic_inc_unchecked(&vcc->stats->rx_err);
38655 IF_ERR(printk("IA: bad packet, dropping it");)
38656 if (status & RX_CER) {
38657 IF_ERR(printk(" cause: packet CRC error\n");)
38658@@ -1168,7 +1168,7 @@ static int rx_pkt(struct atm_dev *dev)
38659 len = dma_addr - buf_addr;
38660 if (len > iadev->rx_buf_sz) {
38661 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
38662- atomic_inc(&vcc->stats->rx_err);
38663+ atomic_inc_unchecked(&vcc->stats->rx_err);
38664 goto out_free_desc;
38665 }
38666
38667@@ -1318,7 +1318,7 @@ static void rx_dle_intr(struct atm_dev *dev)
38668 ia_vcc = INPH_IA_VCC(vcc);
38669 if (ia_vcc == NULL)
38670 {
38671- atomic_inc(&vcc->stats->rx_err);
38672+ atomic_inc_unchecked(&vcc->stats->rx_err);
38673 atm_return(vcc, skb->truesize);
38674 dev_kfree_skb_any(skb);
38675 goto INCR_DLE;
38676@@ -1330,7 +1330,7 @@ static void rx_dle_intr(struct atm_dev *dev)
38677 if ((length > iadev->rx_buf_sz) || (length >
38678 (skb->len - sizeof(struct cpcs_trailer))))
38679 {
38680- atomic_inc(&vcc->stats->rx_err);
38681+ atomic_inc_unchecked(&vcc->stats->rx_err);
38682 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
38683 length, skb->len);)
38684 atm_return(vcc, skb->truesize);
38685@@ -1346,7 +1346,7 @@ static void rx_dle_intr(struct atm_dev *dev)
38686
38687 IF_RX(printk("rx_dle_intr: skb push");)
38688 vcc->push(vcc,skb);
38689- atomic_inc(&vcc->stats->rx);
38690+ atomic_inc_unchecked(&vcc->stats->rx);
38691 iadev->rx_pkt_cnt++;
38692 }
38693 INCR_DLE:
38694@@ -2826,15 +2826,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
38695 {
38696 struct k_sonet_stats *stats;
38697 stats = &PRIV(_ia_dev[board])->sonet_stats;
38698- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
38699- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
38700- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
38701- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
38702- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
38703- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
38704- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
38705- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
38706- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
38707+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
38708+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
38709+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
38710+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
38711+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
38712+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
38713+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
38714+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
38715+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
38716 }
38717 ia_cmds.status = 0;
38718 break;
38719@@ -2939,7 +2939,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
38720 if ((desc == 0) || (desc > iadev->num_tx_desc))
38721 {
38722 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
38723- atomic_inc(&vcc->stats->tx);
38724+ atomic_inc_unchecked(&vcc->stats->tx);
38725 if (vcc->pop)
38726 vcc->pop(vcc, skb);
38727 else
38728@@ -3044,14 +3044,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
38729 ATM_DESC(skb) = vcc->vci;
38730 skb_queue_tail(&iadev->tx_dma_q, skb);
38731
38732- atomic_inc(&vcc->stats->tx);
38733+ atomic_inc_unchecked(&vcc->stats->tx);
38734 iadev->tx_pkt_cnt++;
38735 /* Increment transaction counter */
38736 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
38737
38738 #if 0
38739 /* add flow control logic */
38740- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
38741+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
38742 if (iavcc->vc_desc_cnt > 10) {
38743 vcc->tx_quota = vcc->tx_quota * 3 / 4;
38744 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
38745diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
38746index fa7d7019..1e404c7 100644
38747--- a/drivers/atm/lanai.c
38748+++ b/drivers/atm/lanai.c
38749@@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
38750 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
38751 lanai_endtx(lanai, lvcc);
38752 lanai_free_skb(lvcc->tx.atmvcc, skb);
38753- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
38754+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
38755 }
38756
38757 /* Try to fill the buffer - don't call unless there is backlog */
38758@@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
38759 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
38760 __net_timestamp(skb);
38761 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
38762- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
38763+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
38764 out:
38765 lvcc->rx.buf.ptr = end;
38766 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
38767@@ -1667,7 +1667,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
38768 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
38769 "vcc %d\n", lanai->number, (unsigned int) s, vci);
38770 lanai->stats.service_rxnotaal5++;
38771- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
38772+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
38773 return 0;
38774 }
38775 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
38776@@ -1679,7 +1679,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
38777 int bytes;
38778 read_unlock(&vcc_sklist_lock);
38779 DPRINTK("got trashed rx pdu on vci %d\n", vci);
38780- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
38781+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
38782 lvcc->stats.x.aal5.service_trash++;
38783 bytes = (SERVICE_GET_END(s) * 16) -
38784 (((unsigned long) lvcc->rx.buf.ptr) -
38785@@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
38786 }
38787 if (s & SERVICE_STREAM) {
38788 read_unlock(&vcc_sklist_lock);
38789- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
38790+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
38791 lvcc->stats.x.aal5.service_stream++;
38792 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
38793 "PDU on VCI %d!\n", lanai->number, vci);
38794@@ -1699,7 +1699,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
38795 return 0;
38796 }
38797 DPRINTK("got rx crc error on vci %d\n", vci);
38798- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
38799+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
38800 lvcc->stats.x.aal5.service_rxcrc++;
38801 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
38802 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
38803diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
38804index 9988ac9..7c52585 100644
38805--- a/drivers/atm/nicstar.c
38806+++ b/drivers/atm/nicstar.c
38807@@ -1640,7 +1640,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
38808 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
38809 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
38810 card->index);
38811- atomic_inc(&vcc->stats->tx_err);
38812+ atomic_inc_unchecked(&vcc->stats->tx_err);
38813 dev_kfree_skb_any(skb);
38814 return -EINVAL;
38815 }
38816@@ -1648,7 +1648,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
38817 if (!vc->tx) {
38818 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
38819 card->index);
38820- atomic_inc(&vcc->stats->tx_err);
38821+ atomic_inc_unchecked(&vcc->stats->tx_err);
38822 dev_kfree_skb_any(skb);
38823 return -EINVAL;
38824 }
38825@@ -1656,14 +1656,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
38826 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
38827 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
38828 card->index);
38829- atomic_inc(&vcc->stats->tx_err);
38830+ atomic_inc_unchecked(&vcc->stats->tx_err);
38831 dev_kfree_skb_any(skb);
38832 return -EINVAL;
38833 }
38834
38835 if (skb_shinfo(skb)->nr_frags != 0) {
38836 printk("nicstar%d: No scatter-gather yet.\n", card->index);
38837- atomic_inc(&vcc->stats->tx_err);
38838+ atomic_inc_unchecked(&vcc->stats->tx_err);
38839 dev_kfree_skb_any(skb);
38840 return -EINVAL;
38841 }
38842@@ -1711,11 +1711,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
38843 }
38844
38845 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
38846- atomic_inc(&vcc->stats->tx_err);
38847+ atomic_inc_unchecked(&vcc->stats->tx_err);
38848 dev_kfree_skb_any(skb);
38849 return -EIO;
38850 }
38851- atomic_inc(&vcc->stats->tx);
38852+ atomic_inc_unchecked(&vcc->stats->tx);
38853
38854 return 0;
38855 }
38856@@ -2032,14 +2032,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
38857 printk
38858 ("nicstar%d: Can't allocate buffers for aal0.\n",
38859 card->index);
38860- atomic_add(i, &vcc->stats->rx_drop);
38861+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
38862 break;
38863 }
38864 if (!atm_charge(vcc, sb->truesize)) {
38865 RXPRINTK
38866 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
38867 card->index);
38868- atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
38869+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
38870 dev_kfree_skb_any(sb);
38871 break;
38872 }
38873@@ -2054,7 +2054,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
38874 ATM_SKB(sb)->vcc = vcc;
38875 __net_timestamp(sb);
38876 vcc->push(vcc, sb);
38877- atomic_inc(&vcc->stats->rx);
38878+ atomic_inc_unchecked(&vcc->stats->rx);
38879 cell += ATM_CELL_PAYLOAD;
38880 }
38881
38882@@ -2071,7 +2071,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
38883 if (iovb == NULL) {
38884 printk("nicstar%d: Out of iovec buffers.\n",
38885 card->index);
38886- atomic_inc(&vcc->stats->rx_drop);
38887+ atomic_inc_unchecked(&vcc->stats->rx_drop);
38888 recycle_rx_buf(card, skb);
38889 return;
38890 }
38891@@ -2095,7 +2095,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
38892 small or large buffer itself. */
38893 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
38894 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
38895- atomic_inc(&vcc->stats->rx_err);
38896+ atomic_inc_unchecked(&vcc->stats->rx_err);
38897 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
38898 NS_MAX_IOVECS);
38899 NS_PRV_IOVCNT(iovb) = 0;
38900@@ -2115,7 +2115,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
38901 ("nicstar%d: Expected a small buffer, and this is not one.\n",
38902 card->index);
38903 which_list(card, skb);
38904- atomic_inc(&vcc->stats->rx_err);
38905+ atomic_inc_unchecked(&vcc->stats->rx_err);
38906 recycle_rx_buf(card, skb);
38907 vc->rx_iov = NULL;
38908 recycle_iov_buf(card, iovb);
38909@@ -2128,7 +2128,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
38910 ("nicstar%d: Expected a large buffer, and this is not one.\n",
38911 card->index);
38912 which_list(card, skb);
38913- atomic_inc(&vcc->stats->rx_err);
38914+ atomic_inc_unchecked(&vcc->stats->rx_err);
38915 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
38916 NS_PRV_IOVCNT(iovb));
38917 vc->rx_iov = NULL;
38918@@ -2151,7 +2151,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
38919 printk(" - PDU size mismatch.\n");
38920 else
38921 printk(".\n");
38922- atomic_inc(&vcc->stats->rx_err);
38923+ atomic_inc_unchecked(&vcc->stats->rx_err);
38924 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
38925 NS_PRV_IOVCNT(iovb));
38926 vc->rx_iov = NULL;
38927@@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
38928 /* skb points to a small buffer */
38929 if (!atm_charge(vcc, skb->truesize)) {
38930 push_rxbufs(card, skb);
38931- atomic_inc(&vcc->stats->rx_drop);
38932+ atomic_inc_unchecked(&vcc->stats->rx_drop);
38933 } else {
38934 skb_put(skb, len);
38935 dequeue_sm_buf(card, skb);
38936@@ -2175,7 +2175,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
38937 ATM_SKB(skb)->vcc = vcc;
38938 __net_timestamp(skb);
38939 vcc->push(vcc, skb);
38940- atomic_inc(&vcc->stats->rx);
38941+ atomic_inc_unchecked(&vcc->stats->rx);
38942 }
38943 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
38944 struct sk_buff *sb;
38945@@ -2186,7 +2186,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
38946 if (len <= NS_SMBUFSIZE) {
38947 if (!atm_charge(vcc, sb->truesize)) {
38948 push_rxbufs(card, sb);
38949- atomic_inc(&vcc->stats->rx_drop);
38950+ atomic_inc_unchecked(&vcc->stats->rx_drop);
38951 } else {
38952 skb_put(sb, len);
38953 dequeue_sm_buf(card, sb);
38954@@ -2196,7 +2196,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
38955 ATM_SKB(sb)->vcc = vcc;
38956 __net_timestamp(sb);
38957 vcc->push(vcc, sb);
38958- atomic_inc(&vcc->stats->rx);
38959+ atomic_inc_unchecked(&vcc->stats->rx);
38960 }
38961
38962 push_rxbufs(card, skb);
38963@@ -2205,7 +2205,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
38964
38965 if (!atm_charge(vcc, skb->truesize)) {
38966 push_rxbufs(card, skb);
38967- atomic_inc(&vcc->stats->rx_drop);
38968+ atomic_inc_unchecked(&vcc->stats->rx_drop);
38969 } else {
38970 dequeue_lg_buf(card, skb);
38971 #ifdef NS_USE_DESTRUCTORS
38972@@ -2218,7 +2218,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
38973 ATM_SKB(skb)->vcc = vcc;
38974 __net_timestamp(skb);
38975 vcc->push(vcc, skb);
38976- atomic_inc(&vcc->stats->rx);
38977+ atomic_inc_unchecked(&vcc->stats->rx);
38978 }
38979
38980 push_rxbufs(card, sb);
38981@@ -2239,7 +2239,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
38982 printk
38983 ("nicstar%d: Out of huge buffers.\n",
38984 card->index);
38985- atomic_inc(&vcc->stats->rx_drop);
38986+ atomic_inc_unchecked(&vcc->stats->rx_drop);
38987 recycle_iovec_rx_bufs(card,
38988 (struct iovec *)
38989 iovb->data,
38990@@ -2290,7 +2290,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
38991 card->hbpool.count++;
38992 } else
38993 dev_kfree_skb_any(hb);
38994- atomic_inc(&vcc->stats->rx_drop);
38995+ atomic_inc_unchecked(&vcc->stats->rx_drop);
38996 } else {
38997 /* Copy the small buffer to the huge buffer */
38998 sb = (struct sk_buff *)iov->iov_base;
38999@@ -2327,7 +2327,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
39000 #endif /* NS_USE_DESTRUCTORS */
39001 __net_timestamp(hb);
39002 vcc->push(vcc, hb);
39003- atomic_inc(&vcc->stats->rx);
39004+ atomic_inc_unchecked(&vcc->stats->rx);
39005 }
39006 }
39007
39008diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
39009index 7652e8d..db45069 100644
39010--- a/drivers/atm/solos-pci.c
39011+++ b/drivers/atm/solos-pci.c
39012@@ -838,7 +838,7 @@ static void solos_bh(unsigned long card_arg)
39013 }
39014 atm_charge(vcc, skb->truesize);
39015 vcc->push(vcc, skb);
39016- atomic_inc(&vcc->stats->rx);
39017+ atomic_inc_unchecked(&vcc->stats->rx);
39018 break;
39019
39020 case PKT_STATUS:
39021@@ -1116,7 +1116,7 @@ static uint32_t fpga_tx(struct solos_card *card)
39022 vcc = SKB_CB(oldskb)->vcc;
39023
39024 if (vcc) {
39025- atomic_inc(&vcc->stats->tx);
39026+ atomic_inc_unchecked(&vcc->stats->tx);
39027 solos_pop(vcc, oldskb);
39028 } else {
39029 dev_kfree_skb_irq(oldskb);
39030diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
39031index 0215934..ce9f5b1 100644
39032--- a/drivers/atm/suni.c
39033+++ b/drivers/atm/suni.c
39034@@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
39035
39036
39037 #define ADD_LIMITED(s,v) \
39038- atomic_add((v),&stats->s); \
39039- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
39040+ atomic_add_unchecked((v),&stats->s); \
39041+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
39042
39043
39044 static void suni_hz(unsigned long from_timer)
39045diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
39046index 5120a96..e2572bd 100644
39047--- a/drivers/atm/uPD98402.c
39048+++ b/drivers/atm/uPD98402.c
39049@@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
39050 struct sonet_stats tmp;
39051 int error = 0;
39052
39053- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
39054+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
39055 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
39056 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
39057 if (zero && !error) {
39058@@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
39059
39060
39061 #define ADD_LIMITED(s,v) \
39062- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
39063- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
39064- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
39065+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
39066+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
39067+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
39068
39069
39070 static void stat_event(struct atm_dev *dev)
39071@@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
39072 if (reason & uPD98402_INT_PFM) stat_event(dev);
39073 if (reason & uPD98402_INT_PCO) {
39074 (void) GET(PCOCR); /* clear interrupt cause */
39075- atomic_add(GET(HECCT),
39076+ atomic_add_unchecked(GET(HECCT),
39077 &PRIV(dev)->sonet_stats.uncorr_hcs);
39078 }
39079 if ((reason & uPD98402_INT_RFO) &&
39080@@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
39081 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
39082 uPD98402_INT_LOS),PIMR); /* enable them */
39083 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
39084- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
39085- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
39086- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
39087+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
39088+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
39089+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
39090 return 0;
39091 }
39092
39093diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
39094index 969c3c2..9b72956 100644
39095--- a/drivers/atm/zatm.c
39096+++ b/drivers/atm/zatm.c
39097@@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
39098 }
39099 if (!size) {
39100 dev_kfree_skb_irq(skb);
39101- if (vcc) atomic_inc(&vcc->stats->rx_err);
39102+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
39103 continue;
39104 }
39105 if (!atm_charge(vcc,skb->truesize)) {
39106@@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
39107 skb->len = size;
39108 ATM_SKB(skb)->vcc = vcc;
39109 vcc->push(vcc,skb);
39110- atomic_inc(&vcc->stats->rx);
39111+ atomic_inc_unchecked(&vcc->stats->rx);
39112 }
39113 zout(pos & 0xffff,MTA(mbx));
39114 #if 0 /* probably a stupid idea */
39115@@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
39116 skb_queue_head(&zatm_vcc->backlog,skb);
39117 break;
39118 }
39119- atomic_inc(&vcc->stats->tx);
39120+ atomic_inc_unchecked(&vcc->stats->tx);
39121 wake_up(&zatm_vcc->tx_wait);
39122 }
39123
39124diff --git a/drivers/base/bus.c b/drivers/base/bus.c
39125index 83e910a..b224a73 100644
39126--- a/drivers/base/bus.c
39127+++ b/drivers/base/bus.c
39128@@ -1124,7 +1124,7 @@ int subsys_interface_register(struct subsys_interface *sif)
39129 return -EINVAL;
39130
39131 mutex_lock(&subsys->p->mutex);
39132- list_add_tail(&sif->node, &subsys->p->interfaces);
39133+ pax_list_add_tail((struct list_head *)&sif->node, &subsys->p->interfaces);
39134 if (sif->add_dev) {
39135 subsys_dev_iter_init(&iter, subsys, NULL, NULL);
39136 while ((dev = subsys_dev_iter_next(&iter)))
39137@@ -1149,7 +1149,7 @@ void subsys_interface_unregister(struct subsys_interface *sif)
39138 subsys = sif->subsys;
39139
39140 mutex_lock(&subsys->p->mutex);
39141- list_del_init(&sif->node);
39142+ pax_list_del_init((struct list_head *)&sif->node);
39143 if (sif->remove_dev) {
39144 subsys_dev_iter_init(&iter, subsys, NULL, NULL);
39145 while ((dev = subsys_dev_iter_next(&iter)))
39146diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
39147index 25798db..15f130e 100644
39148--- a/drivers/base/devtmpfs.c
39149+++ b/drivers/base/devtmpfs.c
39150@@ -354,7 +354,7 @@ int devtmpfs_mount(const char *mntdir)
39151 if (!thread)
39152 return 0;
39153
39154- err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
39155+ err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
39156 if (err)
39157 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
39158 else
39159@@ -380,11 +380,11 @@ static int devtmpfsd(void *p)
39160 *err = sys_unshare(CLONE_NEWNS);
39161 if (*err)
39162 goto out;
39163- *err = sys_mount("devtmpfs", "/", "devtmpfs", MS_SILENT, options);
39164+ *err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)"/", (char __force_user *)"devtmpfs", MS_SILENT, (char __force_user *)options);
39165 if (*err)
39166 goto out;
39167- sys_chdir("/.."); /* will traverse into overmounted root */
39168- sys_chroot(".");
39169+ sys_chdir((char __force_user *)"/.."); /* will traverse into overmounted root */
39170+ sys_chroot((char __force_user *)".");
39171 complete(&setup_done);
39172 while (1) {
39173 spin_lock(&req_lock);
39174diff --git a/drivers/base/node.c b/drivers/base/node.c
39175index d51c49c..28908df 100644
39176--- a/drivers/base/node.c
39177+++ b/drivers/base/node.c
39178@@ -623,7 +623,7 @@ static ssize_t print_nodes_state(enum node_states state, char *buf)
39179 struct node_attr {
39180 struct device_attribute attr;
39181 enum node_states state;
39182-};
39183+} __do_const;
39184
39185 static ssize_t show_node_state(struct device *dev,
39186 struct device_attribute *attr, char *buf)
39187diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
39188index eee55c1..b8c9393 100644
39189--- a/drivers/base/power/domain.c
39190+++ b/drivers/base/power/domain.c
39191@@ -1821,9 +1821,9 @@ int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td)
39192
39193 if (dev->power.subsys_data->domain_data) {
39194 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
39195- gpd_data->ops = (struct gpd_dev_ops){ NULL };
39196+ memset(&gpd_data->ops, 0, sizeof(gpd_data->ops));
39197 if (clear_td)
39198- gpd_data->td = (struct gpd_timing_data){ 0 };
39199+ memset(&gpd_data->td, 0, sizeof(gpd_data->td));
39200
39201 if (--gpd_data->refcount == 0) {
39202 dev->power.subsys_data->domain_data = NULL;
39203@@ -1862,7 +1862,7 @@ int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
39204 {
39205 struct cpuidle_driver *cpuidle_drv;
39206 struct gpd_cpu_data *cpu_data;
39207- struct cpuidle_state *idle_state;
39208+ cpuidle_state_no_const *idle_state;
39209 int ret = 0;
39210
39211 if (IS_ERR_OR_NULL(genpd) || state < 0)
39212@@ -1930,7 +1930,7 @@ int pm_genpd_name_attach_cpuidle(const char *name, int state)
39213 int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
39214 {
39215 struct gpd_cpu_data *cpu_data;
39216- struct cpuidle_state *idle_state;
39217+ cpuidle_state_no_const *idle_state;
39218 int ret = 0;
39219
39220 if (IS_ERR_OR_NULL(genpd))
39221diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
39222index 95b181d1..c4f0e19 100644
39223--- a/drivers/base/power/sysfs.c
39224+++ b/drivers/base/power/sysfs.c
39225@@ -185,7 +185,7 @@ static ssize_t rtpm_status_show(struct device *dev,
39226 return -EIO;
39227 }
39228 }
39229- return sprintf(buf, p);
39230+ return sprintf(buf, "%s", p);
39231 }
39232
39233 static DEVICE_ATTR(runtime_status, 0444, rtpm_status_show, NULL);
39234diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
39235index eb1bd2e..2667d3a 100644
39236--- a/drivers/base/power/wakeup.c
39237+++ b/drivers/base/power/wakeup.c
39238@@ -29,14 +29,14 @@ bool events_check_enabled __read_mostly;
39239 * They need to be modified together atomically, so it's better to use one
39240 * atomic variable to hold them both.
39241 */
39242-static atomic_t combined_event_count = ATOMIC_INIT(0);
39243+static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
39244
39245 #define IN_PROGRESS_BITS (sizeof(int) * 4)
39246 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
39247
39248 static void split_counters(unsigned int *cnt, unsigned int *inpr)
39249 {
39250- unsigned int comb = atomic_read(&combined_event_count);
39251+ unsigned int comb = atomic_read_unchecked(&combined_event_count);
39252
39253 *cnt = (comb >> IN_PROGRESS_BITS);
39254 *inpr = comb & MAX_IN_PROGRESS;
39255@@ -401,7 +401,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
39256 ws->start_prevent_time = ws->last_time;
39257
39258 /* Increment the counter of events in progress. */
39259- cec = atomic_inc_return(&combined_event_count);
39260+ cec = atomic_inc_return_unchecked(&combined_event_count);
39261
39262 trace_wakeup_source_activate(ws->name, cec);
39263 }
39264@@ -527,7 +527,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
39265 * Increment the counter of registered wakeup events and decrement the
39266 * couter of wakeup events in progress simultaneously.
39267 */
39268- cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count);
39269+ cec = atomic_add_return_unchecked(MAX_IN_PROGRESS, &combined_event_count);
39270 trace_wakeup_source_deactivate(ws->name, cec);
39271
39272 split_counters(&cnt, &inpr);
39273diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c
39274index dbb8350..4762f4c 100644
39275--- a/drivers/base/syscore.c
39276+++ b/drivers/base/syscore.c
39277@@ -22,7 +22,7 @@ static DEFINE_MUTEX(syscore_ops_lock);
39278 void register_syscore_ops(struct syscore_ops *ops)
39279 {
39280 mutex_lock(&syscore_ops_lock);
39281- list_add_tail(&ops->node, &syscore_ops_list);
39282+ pax_list_add_tail((struct list_head *)&ops->node, &syscore_ops_list);
39283 mutex_unlock(&syscore_ops_lock);
39284 }
39285 EXPORT_SYMBOL_GPL(register_syscore_ops);
39286@@ -34,7 +34,7 @@ EXPORT_SYMBOL_GPL(register_syscore_ops);
39287 void unregister_syscore_ops(struct syscore_ops *ops)
39288 {
39289 mutex_lock(&syscore_ops_lock);
39290- list_del(&ops->node);
39291+ pax_list_del((struct list_head *)&ops->node);
39292 mutex_unlock(&syscore_ops_lock);
39293 }
39294 EXPORT_SYMBOL_GPL(unregister_syscore_ops);
39295diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
39296index ff20f19..018f1da 100644
39297--- a/drivers/block/cciss.c
39298+++ b/drivers/block/cciss.c
39299@@ -3008,7 +3008,7 @@ static void start_io(ctlr_info_t *h)
39300 while (!list_empty(&h->reqQ)) {
39301 c = list_entry(h->reqQ.next, CommandList_struct, list);
39302 /* can't do anything if fifo is full */
39303- if ((h->access.fifo_full(h))) {
39304+ if ((h->access->fifo_full(h))) {
39305 dev_warn(&h->pdev->dev, "fifo full\n");
39306 break;
39307 }
39308@@ -3018,7 +3018,7 @@ static void start_io(ctlr_info_t *h)
39309 h->Qdepth--;
39310
39311 /* Tell the controller execute command */
39312- h->access.submit_command(h, c);
39313+ h->access->submit_command(h, c);
39314
39315 /* Put job onto the completed Q */
39316 addQ(&h->cmpQ, c);
39317@@ -3444,17 +3444,17 @@ startio:
39318
39319 static inline unsigned long get_next_completion(ctlr_info_t *h)
39320 {
39321- return h->access.command_completed(h);
39322+ return h->access->command_completed(h);
39323 }
39324
39325 static inline int interrupt_pending(ctlr_info_t *h)
39326 {
39327- return h->access.intr_pending(h);
39328+ return h->access->intr_pending(h);
39329 }
39330
39331 static inline long interrupt_not_for_us(ctlr_info_t *h)
39332 {
39333- return ((h->access.intr_pending(h) == 0) ||
39334+ return ((h->access->intr_pending(h) == 0) ||
39335 (h->interrupts_enabled == 0));
39336 }
39337
39338@@ -3487,7 +3487,7 @@ static inline u32 next_command(ctlr_info_t *h)
39339 u32 a;
39340
39341 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
39342- return h->access.command_completed(h);
39343+ return h->access->command_completed(h);
39344
39345 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
39346 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
39347@@ -4044,7 +4044,7 @@ static void cciss_put_controller_into_performant_mode(ctlr_info_t *h)
39348 trans_support & CFGTBL_Trans_use_short_tags);
39349
39350 /* Change the access methods to the performant access methods */
39351- h->access = SA5_performant_access;
39352+ h->access = &SA5_performant_access;
39353 h->transMethod = CFGTBL_Trans_Performant;
39354
39355 return;
39356@@ -4318,7 +4318,7 @@ static int cciss_pci_init(ctlr_info_t *h)
39357 if (prod_index < 0)
39358 return -ENODEV;
39359 h->product_name = products[prod_index].product_name;
39360- h->access = *(products[prod_index].access);
39361+ h->access = products[prod_index].access;
39362
39363 if (cciss_board_disabled(h)) {
39364 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
39365@@ -5050,7 +5050,7 @@ reinit_after_soft_reset:
39366 }
39367
39368 /* make sure the board interrupts are off */
39369- h->access.set_intr_mask(h, CCISS_INTR_OFF);
39370+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
39371 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
39372 if (rc)
39373 goto clean2;
39374@@ -5100,7 +5100,7 @@ reinit_after_soft_reset:
39375 * fake ones to scoop up any residual completions.
39376 */
39377 spin_lock_irqsave(&h->lock, flags);
39378- h->access.set_intr_mask(h, CCISS_INTR_OFF);
39379+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
39380 spin_unlock_irqrestore(&h->lock, flags);
39381 free_irq(h->intr[h->intr_mode], h);
39382 rc = cciss_request_irq(h, cciss_msix_discard_completions,
39383@@ -5120,9 +5120,9 @@ reinit_after_soft_reset:
39384 dev_info(&h->pdev->dev, "Board READY.\n");
39385 dev_info(&h->pdev->dev,
39386 "Waiting for stale completions to drain.\n");
39387- h->access.set_intr_mask(h, CCISS_INTR_ON);
39388+ h->access->set_intr_mask(h, CCISS_INTR_ON);
39389 msleep(10000);
39390- h->access.set_intr_mask(h, CCISS_INTR_OFF);
39391+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
39392
39393 rc = controller_reset_failed(h->cfgtable);
39394 if (rc)
39395@@ -5145,7 +5145,7 @@ reinit_after_soft_reset:
39396 cciss_scsi_setup(h);
39397
39398 /* Turn the interrupts on so we can service requests */
39399- h->access.set_intr_mask(h, CCISS_INTR_ON);
39400+ h->access->set_intr_mask(h, CCISS_INTR_ON);
39401
39402 /* Get the firmware version */
39403 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
39404@@ -5217,7 +5217,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
39405 kfree(flush_buf);
39406 if (return_code != IO_OK)
39407 dev_warn(&h->pdev->dev, "Error flushing cache\n");
39408- h->access.set_intr_mask(h, CCISS_INTR_OFF);
39409+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
39410 free_irq(h->intr[h->intr_mode], h);
39411 }
39412
39413diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
39414index 7fda30e..2f27946 100644
39415--- a/drivers/block/cciss.h
39416+++ b/drivers/block/cciss.h
39417@@ -101,7 +101,7 @@ struct ctlr_info
39418 /* information about each logical volume */
39419 drive_info_struct *drv[CISS_MAX_LUN];
39420
39421- struct access_method access;
39422+ struct access_method *access;
39423
39424 /* queue and queue Info */
39425 struct list_head reqQ;
39426@@ -402,27 +402,27 @@ static bool SA5_performant_intr_pending(ctlr_info_t *h)
39427 }
39428
39429 static struct access_method SA5_access = {
39430- SA5_submit_command,
39431- SA5_intr_mask,
39432- SA5_fifo_full,
39433- SA5_intr_pending,
39434- SA5_completed,
39435+ .submit_command = SA5_submit_command,
39436+ .set_intr_mask = SA5_intr_mask,
39437+ .fifo_full = SA5_fifo_full,
39438+ .intr_pending = SA5_intr_pending,
39439+ .command_completed = SA5_completed,
39440 };
39441
39442 static struct access_method SA5B_access = {
39443- SA5_submit_command,
39444- SA5B_intr_mask,
39445- SA5_fifo_full,
39446- SA5B_intr_pending,
39447- SA5_completed,
39448+ .submit_command = SA5_submit_command,
39449+ .set_intr_mask = SA5B_intr_mask,
39450+ .fifo_full = SA5_fifo_full,
39451+ .intr_pending = SA5B_intr_pending,
39452+ .command_completed = SA5_completed,
39453 };
39454
39455 static struct access_method SA5_performant_access = {
39456- SA5_submit_command,
39457- SA5_performant_intr_mask,
39458- SA5_fifo_full,
39459- SA5_performant_intr_pending,
39460- SA5_performant_completed,
39461+ .submit_command = SA5_submit_command,
39462+ .set_intr_mask = SA5_performant_intr_mask,
39463+ .fifo_full = SA5_fifo_full,
39464+ .intr_pending = SA5_performant_intr_pending,
39465+ .command_completed = SA5_performant_completed,
39466 };
39467
39468 struct board_type {
39469diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
39470index 2b94403..fd6ad1f 100644
39471--- a/drivers/block/cpqarray.c
39472+++ b/drivers/block/cpqarray.c
39473@@ -404,7 +404,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
39474 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
39475 goto Enomem4;
39476 }
39477- hba[i]->access.set_intr_mask(hba[i], 0);
39478+ hba[i]->access->set_intr_mask(hba[i], 0);
39479 if (request_irq(hba[i]->intr, do_ida_intr,
39480 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
39481 {
39482@@ -459,7 +459,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
39483 add_timer(&hba[i]->timer);
39484
39485 /* Enable IRQ now that spinlock and rate limit timer are set up */
39486- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
39487+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
39488
39489 for(j=0; j<NWD; j++) {
39490 struct gendisk *disk = ida_gendisk[i][j];
39491@@ -694,7 +694,7 @@ DBGINFO(
39492 for(i=0; i<NR_PRODUCTS; i++) {
39493 if (board_id == products[i].board_id) {
39494 c->product_name = products[i].product_name;
39495- c->access = *(products[i].access);
39496+ c->access = products[i].access;
39497 break;
39498 }
39499 }
39500@@ -792,7 +792,7 @@ static int cpqarray_eisa_detect(void)
39501 hba[ctlr]->intr = intr;
39502 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
39503 hba[ctlr]->product_name = products[j].product_name;
39504- hba[ctlr]->access = *(products[j].access);
39505+ hba[ctlr]->access = products[j].access;
39506 hba[ctlr]->ctlr = ctlr;
39507 hba[ctlr]->board_id = board_id;
39508 hba[ctlr]->pci_dev = NULL; /* not PCI */
39509@@ -978,7 +978,7 @@ static void start_io(ctlr_info_t *h)
39510
39511 while((c = h->reqQ) != NULL) {
39512 /* Can't do anything if we're busy */
39513- if (h->access.fifo_full(h) == 0)
39514+ if (h->access->fifo_full(h) == 0)
39515 return;
39516
39517 /* Get the first entry from the request Q */
39518@@ -986,7 +986,7 @@ static void start_io(ctlr_info_t *h)
39519 h->Qdepth--;
39520
39521 /* Tell the controller to do our bidding */
39522- h->access.submit_command(h, c);
39523+ h->access->submit_command(h, c);
39524
39525 /* Get onto the completion Q */
39526 addQ(&h->cmpQ, c);
39527@@ -1048,7 +1048,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
39528 unsigned long flags;
39529 __u32 a,a1;
39530
39531- istat = h->access.intr_pending(h);
39532+ istat = h->access->intr_pending(h);
39533 /* Is this interrupt for us? */
39534 if (istat == 0)
39535 return IRQ_NONE;
39536@@ -1059,7 +1059,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
39537 */
39538 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
39539 if (istat & FIFO_NOT_EMPTY) {
39540- while((a = h->access.command_completed(h))) {
39541+ while((a = h->access->command_completed(h))) {
39542 a1 = a; a &= ~3;
39543 if ((c = h->cmpQ) == NULL)
39544 {
39545@@ -1448,11 +1448,11 @@ static int sendcmd(
39546 /*
39547 * Disable interrupt
39548 */
39549- info_p->access.set_intr_mask(info_p, 0);
39550+ info_p->access->set_intr_mask(info_p, 0);
39551 /* Make sure there is room in the command FIFO */
39552 /* Actually it should be completely empty at this time. */
39553 for (i = 200000; i > 0; i--) {
39554- temp = info_p->access.fifo_full(info_p);
39555+ temp = info_p->access->fifo_full(info_p);
39556 if (temp != 0) {
39557 break;
39558 }
39559@@ -1465,7 +1465,7 @@ DBG(
39560 /*
39561 * Send the cmd
39562 */
39563- info_p->access.submit_command(info_p, c);
39564+ info_p->access->submit_command(info_p, c);
39565 complete = pollcomplete(ctlr);
39566
39567 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
39568@@ -1548,9 +1548,9 @@ static int revalidate_allvol(ctlr_info_t *host)
39569 * we check the new geometry. Then turn interrupts back on when
39570 * we're done.
39571 */
39572- host->access.set_intr_mask(host, 0);
39573+ host->access->set_intr_mask(host, 0);
39574 getgeometry(ctlr);
39575- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
39576+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
39577
39578 for(i=0; i<NWD; i++) {
39579 struct gendisk *disk = ida_gendisk[ctlr][i];
39580@@ -1590,7 +1590,7 @@ static int pollcomplete(int ctlr)
39581 /* Wait (up to 2 seconds) for a command to complete */
39582
39583 for (i = 200000; i > 0; i--) {
39584- done = hba[ctlr]->access.command_completed(hba[ctlr]);
39585+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
39586 if (done == 0) {
39587 udelay(10); /* a short fixed delay */
39588 } else
39589diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
39590index be73e9d..7fbf140 100644
39591--- a/drivers/block/cpqarray.h
39592+++ b/drivers/block/cpqarray.h
39593@@ -99,7 +99,7 @@ struct ctlr_info {
39594 drv_info_t drv[NWD];
39595 struct proc_dir_entry *proc;
39596
39597- struct access_method access;
39598+ struct access_method *access;
39599
39600 cmdlist_t *reqQ;
39601 cmdlist_t *cmpQ;
39602diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
39603index 426c97a..8c58607 100644
39604--- a/drivers/block/drbd/drbd_bitmap.c
39605+++ b/drivers/block/drbd/drbd_bitmap.c
39606@@ -1036,7 +1036,7 @@ static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_ho
39607 submit_bio(rw, bio);
39608 /* this should not count as user activity and cause the
39609 * resync to throttle -- see drbd_rs_should_slow_down(). */
39610- atomic_add(len >> 9, &device->rs_sect_ev);
39611+ atomic_add_unchecked(len >> 9, &device->rs_sect_ev);
39612 }
39613 }
39614
39615diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
39616index 1a00001..c0d4253 100644
39617--- a/drivers/block/drbd/drbd_int.h
39618+++ b/drivers/block/drbd/drbd_int.h
39619@@ -387,7 +387,7 @@ struct drbd_epoch {
39620 struct drbd_connection *connection;
39621 struct list_head list;
39622 unsigned int barrier_nr;
39623- atomic_t epoch_size; /* increased on every request added. */
39624+ atomic_unchecked_t epoch_size; /* increased on every request added. */
39625 atomic_t active; /* increased on every req. added, and dec on every finished. */
39626 unsigned long flags;
39627 };
39628@@ -948,7 +948,7 @@ struct drbd_device {
39629 unsigned int al_tr_number;
39630 int al_tr_cycle;
39631 wait_queue_head_t seq_wait;
39632- atomic_t packet_seq;
39633+ atomic_unchecked_t packet_seq;
39634 unsigned int peer_seq;
39635 spinlock_t peer_seq_lock;
39636 unsigned long comm_bm_set; /* communicated number of set bits. */
39637@@ -957,8 +957,8 @@ struct drbd_device {
39638 struct mutex own_state_mutex;
39639 struct mutex *state_mutex; /* either own_state_mutex or first_peer_device(device)->connection->cstate_mutex */
39640 char congestion_reason; /* Why we where congested... */
39641- atomic_t rs_sect_in; /* for incoming resync data rate, SyncTarget */
39642- atomic_t rs_sect_ev; /* for submitted resync data rate, both */
39643+ atomic_unchecked_t rs_sect_in; /* for incoming resync data rate, SyncTarget */
39644+ atomic_unchecked_t rs_sect_ev; /* for submitted resync data rate, both */
39645 int rs_last_sect_ev; /* counter to compare with */
39646 int rs_last_events; /* counter of read or write "events" (unit sectors)
39647 * on the lower level device when we last looked. */
39648@@ -1569,7 +1569,7 @@ static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
39649 char __user *uoptval;
39650 int err;
39651
39652- uoptval = (char __user __force *)optval;
39653+ uoptval = (char __force_user *)optval;
39654
39655 set_fs(KERNEL_DS);
39656 if (level == SOL_SOCKET)
39657diff --git a/drivers/block/drbd/drbd_interval.c b/drivers/block/drbd/drbd_interval.c
39658index 89c497c..9c736ae 100644
39659--- a/drivers/block/drbd/drbd_interval.c
39660+++ b/drivers/block/drbd/drbd_interval.c
39661@@ -67,9 +67,9 @@ static void augment_rotate(struct rb_node *rb_old, struct rb_node *rb_new)
39662 }
39663
39664 static const struct rb_augment_callbacks augment_callbacks = {
39665- augment_propagate,
39666- augment_copy,
39667- augment_rotate,
39668+ .propagate = augment_propagate,
39669+ .copy = augment_copy,
39670+ .rotate = augment_rotate,
39671 };
39672
39673 /**
39674diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
39675index 9b465bb..00034ecf 100644
39676--- a/drivers/block/drbd/drbd_main.c
39677+++ b/drivers/block/drbd/drbd_main.c
39678@@ -1328,7 +1328,7 @@ static int _drbd_send_ack(struct drbd_peer_device *peer_device, enum drbd_packet
39679 p->sector = sector;
39680 p->block_id = block_id;
39681 p->blksize = blksize;
39682- p->seq_num = cpu_to_be32(atomic_inc_return(&peer_device->device->packet_seq));
39683+ p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&peer_device->device->packet_seq));
39684 return drbd_send_command(peer_device, sock, cmd, sizeof(*p), NULL, 0);
39685 }
39686
39687@@ -1634,7 +1634,7 @@ int drbd_send_dblock(struct drbd_peer_device *peer_device, struct drbd_request *
39688 return -EIO;
39689 p->sector = cpu_to_be64(req->i.sector);
39690 p->block_id = (unsigned long)req;
39691- p->seq_num = cpu_to_be32(atomic_inc_return(&device->packet_seq));
39692+ p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&device->packet_seq));
39693 dp_flags = bio_flags_to_wire(peer_device->connection, req->master_bio->bi_rw);
39694 if (device->state.conn >= C_SYNC_SOURCE &&
39695 device->state.conn <= C_PAUSED_SYNC_T)
39696@@ -1915,8 +1915,8 @@ void drbd_init_set_defaults(struct drbd_device *device)
39697 atomic_set(&device->unacked_cnt, 0);
39698 atomic_set(&device->local_cnt, 0);
39699 atomic_set(&device->pp_in_use_by_net, 0);
39700- atomic_set(&device->rs_sect_in, 0);
39701- atomic_set(&device->rs_sect_ev, 0);
39702+ atomic_set_unchecked(&device->rs_sect_in, 0);
39703+ atomic_set_unchecked(&device->rs_sect_ev, 0);
39704 atomic_set(&device->ap_in_flight, 0);
39705 atomic_set(&device->md_io.in_use, 0);
39706
39707@@ -2688,8 +2688,8 @@ void drbd_destroy_connection(struct kref *kref)
39708 struct drbd_connection *connection = container_of(kref, struct drbd_connection, kref);
39709 struct drbd_resource *resource = connection->resource;
39710
39711- if (atomic_read(&connection->current_epoch->epoch_size) != 0)
39712- drbd_err(connection, "epoch_size:%d\n", atomic_read(&connection->current_epoch->epoch_size));
39713+ if (atomic_read_unchecked(&connection->current_epoch->epoch_size) != 0)
39714+ drbd_err(connection, "epoch_size:%d\n", atomic_read_unchecked(&connection->current_epoch->epoch_size));
39715 kfree(connection->current_epoch);
39716
39717 idr_destroy(&connection->peer_devices);
39718diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
39719index 1cd47df..57c53c0 100644
39720--- a/drivers/block/drbd/drbd_nl.c
39721+++ b/drivers/block/drbd/drbd_nl.c
39722@@ -3645,13 +3645,13 @@ finish:
39723
39724 void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib)
39725 {
39726- static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
39727+ static atomic_unchecked_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
39728 struct sk_buff *msg;
39729 struct drbd_genlmsghdr *d_out;
39730 unsigned seq;
39731 int err = -ENOMEM;
39732
39733- seq = atomic_inc_return(&drbd_genl_seq);
39734+ seq = atomic_inc_return_unchecked(&drbd_genl_seq);
39735 msg = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
39736 if (!msg)
39737 goto failed;
39738diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
39739index 9342b8d..b6a6825 100644
39740--- a/drivers/block/drbd/drbd_receiver.c
39741+++ b/drivers/block/drbd/drbd_receiver.c
39742@@ -870,7 +870,7 @@ int drbd_connected(struct drbd_peer_device *peer_device)
39743 struct drbd_device *device = peer_device->device;
39744 int err;
39745
39746- atomic_set(&device->packet_seq, 0);
39747+ atomic_set_unchecked(&device->packet_seq, 0);
39748 device->peer_seq = 0;
39749
39750 device->state_mutex = peer_device->connection->agreed_pro_version < 100 ?
39751@@ -1233,7 +1233,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_connection *connectio
39752 do {
39753 next_epoch = NULL;
39754
39755- epoch_size = atomic_read(&epoch->epoch_size);
39756+ epoch_size = atomic_read_unchecked(&epoch->epoch_size);
39757
39758 switch (ev & ~EV_CLEANUP) {
39759 case EV_PUT:
39760@@ -1273,7 +1273,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_connection *connectio
39761 rv = FE_DESTROYED;
39762 } else {
39763 epoch->flags = 0;
39764- atomic_set(&epoch->epoch_size, 0);
39765+ atomic_set_unchecked(&epoch->epoch_size, 0);
39766 /* atomic_set(&epoch->active, 0); is already zero */
39767 if (rv == FE_STILL_LIVE)
39768 rv = FE_RECYCLED;
39769@@ -1550,7 +1550,7 @@ static int receive_Barrier(struct drbd_connection *connection, struct packet_inf
39770 conn_wait_active_ee_empty(connection);
39771 drbd_flush(connection);
39772
39773- if (atomic_read(&connection->current_epoch->epoch_size)) {
39774+ if (atomic_read_unchecked(&connection->current_epoch->epoch_size)) {
39775 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
39776 if (epoch)
39777 break;
39778@@ -1564,11 +1564,11 @@ static int receive_Barrier(struct drbd_connection *connection, struct packet_inf
39779 }
39780
39781 epoch->flags = 0;
39782- atomic_set(&epoch->epoch_size, 0);
39783+ atomic_set_unchecked(&epoch->epoch_size, 0);
39784 atomic_set(&epoch->active, 0);
39785
39786 spin_lock(&connection->epoch_lock);
39787- if (atomic_read(&connection->current_epoch->epoch_size)) {
39788+ if (atomic_read_unchecked(&connection->current_epoch->epoch_size)) {
39789 list_add(&epoch->list, &connection->current_epoch->list);
39790 connection->current_epoch = epoch;
39791 connection->epochs++;
39792@@ -1802,7 +1802,7 @@ static int recv_resync_read(struct drbd_peer_device *peer_device, sector_t secto
39793 list_add_tail(&peer_req->w.list, &device->sync_ee);
39794 spin_unlock_irq(&device->resource->req_lock);
39795
39796- atomic_add(pi->size >> 9, &device->rs_sect_ev);
39797+ atomic_add_unchecked(pi->size >> 9, &device->rs_sect_ev);
39798 if (drbd_submit_peer_request(device, peer_req, WRITE, DRBD_FAULT_RS_WR) == 0)
39799 return 0;
39800
39801@@ -1900,7 +1900,7 @@ static int receive_RSDataReply(struct drbd_connection *connection, struct packet
39802 drbd_send_ack_dp(peer_device, P_NEG_ACK, p, pi->size);
39803 }
39804
39805- atomic_add(pi->size >> 9, &device->rs_sect_in);
39806+ atomic_add_unchecked(pi->size >> 9, &device->rs_sect_in);
39807
39808 return err;
39809 }
39810@@ -2290,7 +2290,7 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
39811
39812 err = wait_for_and_update_peer_seq(peer_device, peer_seq);
39813 drbd_send_ack_dp(peer_device, P_NEG_ACK, p, pi->size);
39814- atomic_inc(&connection->current_epoch->epoch_size);
39815+ atomic_inc_unchecked(&connection->current_epoch->epoch_size);
39816 err2 = drbd_drain_block(peer_device, pi->size);
39817 if (!err)
39818 err = err2;
39819@@ -2334,7 +2334,7 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
39820
39821 spin_lock(&connection->epoch_lock);
39822 peer_req->epoch = connection->current_epoch;
39823- atomic_inc(&peer_req->epoch->epoch_size);
39824+ atomic_inc_unchecked(&peer_req->epoch->epoch_size);
39825 atomic_inc(&peer_req->epoch->active);
39826 spin_unlock(&connection->epoch_lock);
39827
39828@@ -2479,7 +2479,7 @@ bool drbd_rs_c_min_rate_throttle(struct drbd_device *device)
39829
39830 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
39831 (int)part_stat_read(&disk->part0, sectors[1]) -
39832- atomic_read(&device->rs_sect_ev);
39833+ atomic_read_unchecked(&device->rs_sect_ev);
39834
39835 if (atomic_read(&device->ap_actlog_cnt)
39836 || !device->rs_last_events || curr_events - device->rs_last_events > 64) {
39837@@ -2618,7 +2618,7 @@ static int receive_DataRequest(struct drbd_connection *connection, struct packet
39838 device->use_csums = true;
39839 } else if (pi->cmd == P_OV_REPLY) {
39840 /* track progress, we may need to throttle */
39841- atomic_add(size >> 9, &device->rs_sect_in);
39842+ atomic_add_unchecked(size >> 9, &device->rs_sect_in);
39843 peer_req->w.cb = w_e_end_ov_reply;
39844 dec_rs_pending(device);
39845 /* drbd_rs_begin_io done when we sent this request,
39846@@ -2691,7 +2691,7 @@ static int receive_DataRequest(struct drbd_connection *connection, struct packet
39847 goto out_free_e;
39848
39849 submit_for_resync:
39850- atomic_add(size >> 9, &device->rs_sect_ev);
39851+ atomic_add_unchecked(size >> 9, &device->rs_sect_ev);
39852
39853 submit:
39854 update_receiver_timing_details(connection, drbd_submit_peer_request);
39855@@ -4564,7 +4564,7 @@ struct data_cmd {
39856 int expect_payload;
39857 size_t pkt_size;
39858 int (*fn)(struct drbd_connection *, struct packet_info *);
39859-};
39860+} __do_const;
39861
39862 static struct data_cmd drbd_cmd_handler[] = {
39863 [P_DATA] = { 1, sizeof(struct p_data), receive_Data },
39864@@ -4678,7 +4678,7 @@ static void conn_disconnect(struct drbd_connection *connection)
39865 if (!list_empty(&connection->current_epoch->list))
39866 drbd_err(connection, "ASSERTION FAILED: connection->current_epoch->list not empty\n");
39867 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
39868- atomic_set(&connection->current_epoch->epoch_size, 0);
39869+ atomic_set_unchecked(&connection->current_epoch->epoch_size, 0);
39870 connection->send.seen_any_write_yet = false;
39871
39872 drbd_info(connection, "Connection closed\n");
39873@@ -5182,7 +5182,7 @@ static int got_IsInSync(struct drbd_connection *connection, struct packet_info *
39874 put_ldev(device);
39875 }
39876 dec_rs_pending(device);
39877- atomic_add(blksize >> 9, &device->rs_sect_in);
39878+ atomic_add_unchecked(blksize >> 9, &device->rs_sect_in);
39879
39880 return 0;
39881 }
39882@@ -5470,7 +5470,7 @@ static int connection_finish_peer_reqs(struct drbd_connection *connection)
39883 struct asender_cmd {
39884 size_t pkt_size;
39885 int (*fn)(struct drbd_connection *connection, struct packet_info *);
39886-};
39887+} __do_const;
39888
39889 static struct asender_cmd asender_tbl[] = {
39890 [P_PING] = { 0, got_Ping },
39891diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
39892index 50776b3..1477c3f 100644
39893--- a/drivers/block/drbd/drbd_worker.c
39894+++ b/drivers/block/drbd/drbd_worker.c
39895@@ -408,7 +408,7 @@ static int read_for_csum(struct drbd_peer_device *peer_device, sector_t sector,
39896 list_add_tail(&peer_req->w.list, &device->read_ee);
39897 spin_unlock_irq(&device->resource->req_lock);
39898
39899- atomic_add(size >> 9, &device->rs_sect_ev);
39900+ atomic_add_unchecked(size >> 9, &device->rs_sect_ev);
39901 if (drbd_submit_peer_request(device, peer_req, READ, DRBD_FAULT_RS_RD) == 0)
39902 return 0;
39903
39904@@ -553,7 +553,7 @@ static int drbd_rs_number_requests(struct drbd_device *device)
39905 unsigned int sect_in; /* Number of sectors that came in since the last turn */
39906 int number, mxb;
39907
39908- sect_in = atomic_xchg(&device->rs_sect_in, 0);
39909+ sect_in = atomic_xchg_unchecked(&device->rs_sect_in, 0);
39910 device->rs_in_flight -= sect_in;
39911
39912 rcu_read_lock();
39913@@ -1594,8 +1594,8 @@ void drbd_rs_controller_reset(struct drbd_device *device)
39914 {
39915 struct fifo_buffer *plan;
39916
39917- atomic_set(&device->rs_sect_in, 0);
39918- atomic_set(&device->rs_sect_ev, 0);
39919+ atomic_set_unchecked(&device->rs_sect_in, 0);
39920+ atomic_set_unchecked(&device->rs_sect_ev, 0);
39921 device->rs_in_flight = 0;
39922
39923 /* Updating the RCU protected object in place is necessary since
39924diff --git a/drivers/block/loop.c b/drivers/block/loop.c
39925index 6cb1beb..bf490f7 100644
39926--- a/drivers/block/loop.c
39927+++ b/drivers/block/loop.c
39928@@ -232,7 +232,7 @@ static int __do_lo_send_write(struct file *file,
39929
39930 file_start_write(file);
39931 set_fs(get_ds());
39932- bw = file->f_op->write(file, buf, len, &pos);
39933+ bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
39934 set_fs(old_fs);
39935 file_end_write(file);
39936 if (likely(bw == len))
39937diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
39938index 02351e2..a9ea617 100644
39939--- a/drivers/block/nvme-core.c
39940+++ b/drivers/block/nvme-core.c
39941@@ -73,7 +73,6 @@ static LIST_HEAD(dev_list);
39942 static struct task_struct *nvme_thread;
39943 static struct workqueue_struct *nvme_workq;
39944 static wait_queue_head_t nvme_kthread_wait;
39945-static struct notifier_block nvme_nb;
39946
39947 static void nvme_reset_failed_dev(struct work_struct *ws);
39948
39949@@ -2925,6 +2924,10 @@ static struct pci_driver nvme_driver = {
39950 .err_handler = &nvme_err_handler,
39951 };
39952
39953+static struct notifier_block nvme_nb = {
39954+ .notifier_call = &nvme_cpu_notify,
39955+};
39956+
39957 static int __init nvme_init(void)
39958 {
39959 int result;
39960@@ -2941,7 +2944,6 @@ static int __init nvme_init(void)
39961 else if (result > 0)
39962 nvme_major = result;
39963
39964- nvme_nb.notifier_call = &nvme_cpu_notify;
39965 result = register_hotcpu_notifier(&nvme_nb);
39966 if (result)
39967 goto unregister_blkdev;
39968diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
39969index 758ac44..58087fd 100644
39970--- a/drivers/block/pktcdvd.c
39971+++ b/drivers/block/pktcdvd.c
39972@@ -108,7 +108,7 @@ static int pkt_seq_show(struct seq_file *m, void *p);
39973
39974 static sector_t get_zone(sector_t sector, struct pktcdvd_device *pd)
39975 {
39976- return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1);
39977+ return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1UL);
39978 }
39979
39980 /*
39981@@ -1888,7 +1888,7 @@ static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd)
39982 return -EROFS;
39983 }
39984 pd->settings.fp = ti.fp;
39985- pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1);
39986+ pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1UL);
39987
39988 if (ti.nwa_v) {
39989 pd->nwa = be32_to_cpu(ti.next_writable);
39990diff --git a/drivers/block/smart1,2.h b/drivers/block/smart1,2.h
39991index e5565fb..71be10b4 100644
39992--- a/drivers/block/smart1,2.h
39993+++ b/drivers/block/smart1,2.h
39994@@ -108,11 +108,11 @@ static unsigned long smart4_intr_pending(ctlr_info_t *h)
39995 }
39996
39997 static struct access_method smart4_access = {
39998- smart4_submit_command,
39999- smart4_intr_mask,
40000- smart4_fifo_full,
40001- smart4_intr_pending,
40002- smart4_completed,
40003+ .submit_command = smart4_submit_command,
40004+ .set_intr_mask = smart4_intr_mask,
40005+ .fifo_full = smart4_fifo_full,
40006+ .intr_pending = smart4_intr_pending,
40007+ .command_completed = smart4_completed,
40008 };
40009
40010 /*
40011@@ -144,11 +144,11 @@ static unsigned long smart2_intr_pending(ctlr_info_t *h)
40012 }
40013
40014 static struct access_method smart2_access = {
40015- smart2_submit_command,
40016- smart2_intr_mask,
40017- smart2_fifo_full,
40018- smart2_intr_pending,
40019- smart2_completed,
40020+ .submit_command = smart2_submit_command,
40021+ .set_intr_mask = smart2_intr_mask,
40022+ .fifo_full = smart2_fifo_full,
40023+ .intr_pending = smart2_intr_pending,
40024+ .command_completed = smart2_completed,
40025 };
40026
40027 /*
40028@@ -180,11 +180,11 @@ static unsigned long smart2e_intr_pending(ctlr_info_t *h)
40029 }
40030
40031 static struct access_method smart2e_access = {
40032- smart2e_submit_command,
40033- smart2e_intr_mask,
40034- smart2e_fifo_full,
40035- smart2e_intr_pending,
40036- smart2e_completed,
40037+ .submit_command = smart2e_submit_command,
40038+ .set_intr_mask = smart2e_intr_mask,
40039+ .fifo_full = smart2e_fifo_full,
40040+ .intr_pending = smart2e_intr_pending,
40041+ .command_completed = smart2e_completed,
40042 };
40043
40044 /*
40045@@ -270,9 +270,9 @@ static unsigned long smart1_intr_pending(ctlr_info_t *h)
40046 }
40047
40048 static struct access_method smart1_access = {
40049- smart1_submit_command,
40050- smart1_intr_mask,
40051- smart1_fifo_full,
40052- smart1_intr_pending,
40053- smart1_completed,
40054+ .submit_command = smart1_submit_command,
40055+ .set_intr_mask = smart1_intr_mask,
40056+ .fifo_full = smart1_fifo_full,
40057+ .intr_pending = smart1_intr_pending,
40058+ .command_completed = smart1_completed,
40059 };
40060diff --git a/drivers/bluetooth/btwilink.c b/drivers/bluetooth/btwilink.c
40061index f038dba..bb74c08 100644
40062--- a/drivers/bluetooth/btwilink.c
40063+++ b/drivers/bluetooth/btwilink.c
40064@@ -288,7 +288,7 @@ static int ti_st_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
40065
40066 static int bt_ti_probe(struct platform_device *pdev)
40067 {
40068- static struct ti_st *hst;
40069+ struct ti_st *hst;
40070 struct hci_dev *hdev;
40071 int err;
40072
40073diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
40074index 898b84b..86f74b9 100644
40075--- a/drivers/cdrom/cdrom.c
40076+++ b/drivers/cdrom/cdrom.c
40077@@ -610,7 +610,6 @@ int register_cdrom(struct cdrom_device_info *cdi)
40078 ENSURE(reset, CDC_RESET);
40079 ENSURE(generic_packet, CDC_GENERIC_PACKET);
40080 cdi->mc_flags = 0;
40081- cdo->n_minors = 0;
40082 cdi->options = CDO_USE_FFLAGS;
40083
40084 if (autoclose == 1 && CDROM_CAN(CDC_CLOSE_TRAY))
40085@@ -630,8 +629,11 @@ int register_cdrom(struct cdrom_device_info *cdi)
40086 else
40087 cdi->cdda_method = CDDA_OLD;
40088
40089- if (!cdo->generic_packet)
40090- cdo->generic_packet = cdrom_dummy_generic_packet;
40091+ if (!cdo->generic_packet) {
40092+ pax_open_kernel();
40093+ *(void **)&cdo->generic_packet = cdrom_dummy_generic_packet;
40094+ pax_close_kernel();
40095+ }
40096
40097 cd_dbg(CD_REG_UNREG, "drive \"/dev/%s\" registered\n", cdi->name);
40098 mutex_lock(&cdrom_mutex);
40099@@ -652,7 +654,6 @@ void unregister_cdrom(struct cdrom_device_info *cdi)
40100 if (cdi->exit)
40101 cdi->exit(cdi);
40102
40103- cdi->ops->n_minors--;
40104 cd_dbg(CD_REG_UNREG, "drive \"/dev/%s\" unregistered\n", cdi->name);
40105 }
40106
40107@@ -2126,7 +2127,7 @@ static int cdrom_read_cdda_old(struct cdrom_device_info *cdi, __u8 __user *ubuf,
40108 */
40109 nr = nframes;
40110 do {
40111- cgc.buffer = kmalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL);
40112+ cgc.buffer = kzalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL);
40113 if (cgc.buffer)
40114 break;
40115
40116@@ -3434,7 +3435,7 @@ static int cdrom_print_info(const char *header, int val, char *info,
40117 struct cdrom_device_info *cdi;
40118 int ret;
40119
40120- ret = scnprintf(info + *pos, max_size - *pos, header);
40121+ ret = scnprintf(info + *pos, max_size - *pos, "%s", header);
40122 if (!ret)
40123 return 1;
40124
40125diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
40126index 584bc31..e64a12c 100644
40127--- a/drivers/cdrom/gdrom.c
40128+++ b/drivers/cdrom/gdrom.c
40129@@ -491,7 +491,6 @@ static struct cdrom_device_ops gdrom_ops = {
40130 .audio_ioctl = gdrom_audio_ioctl,
40131 .capability = CDC_MULTI_SESSION | CDC_MEDIA_CHANGED |
40132 CDC_RESET | CDC_DRIVE_STATUS | CDC_CD_R,
40133- .n_minors = 1,
40134 };
40135
40136 static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode)
40137diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
40138index 6e9f74a..50c7cea 100644
40139--- a/drivers/char/Kconfig
40140+++ b/drivers/char/Kconfig
40141@@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
40142
40143 config DEVKMEM
40144 bool "/dev/kmem virtual device support"
40145- default y
40146+ default n
40147+ depends on !GRKERNSEC_KMEM
40148 help
40149 Say Y here if you want to support the /dev/kmem device. The
40150 /dev/kmem device is rarely used, but can be used for certain
40151@@ -577,6 +578,7 @@ config DEVPORT
40152 bool
40153 depends on !M68K
40154 depends on ISA || PCI
40155+ depends on !GRKERNSEC_KMEM
40156 default y
40157
40158 source "drivers/s390/char/Kconfig"
40159diff --git a/drivers/char/agp/compat_ioctl.c b/drivers/char/agp/compat_ioctl.c
40160index a48e05b..6bac831 100644
40161--- a/drivers/char/agp/compat_ioctl.c
40162+++ b/drivers/char/agp/compat_ioctl.c
40163@@ -108,7 +108,7 @@ static int compat_agpioc_reserve_wrap(struct agp_file_private *priv, void __user
40164 return -ENOMEM;
40165 }
40166
40167- if (copy_from_user(usegment, (void __user *) ureserve.seg_list,
40168+ if (copy_from_user(usegment, (void __force_user *) ureserve.seg_list,
40169 sizeof(*usegment) * ureserve.seg_count)) {
40170 kfree(usegment);
40171 kfree(ksegment);
40172diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
40173index 09f17eb..8531d2f 100644
40174--- a/drivers/char/agp/frontend.c
40175+++ b/drivers/char/agp/frontend.c
40176@@ -806,7 +806,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
40177 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
40178 return -EFAULT;
40179
40180- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
40181+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
40182 return -EFAULT;
40183
40184 client = agp_find_client_by_pid(reserve.pid);
40185@@ -836,7 +836,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
40186 if (segment == NULL)
40187 return -ENOMEM;
40188
40189- if (copy_from_user(segment, (void __user *) reserve.seg_list,
40190+ if (copy_from_user(segment, (void __force_user *) reserve.seg_list,
40191 sizeof(struct agp_segment) * reserve.seg_count)) {
40192 kfree(segment);
40193 return -EFAULT;
40194diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
40195index 4f94375..413694e 100644
40196--- a/drivers/char/genrtc.c
40197+++ b/drivers/char/genrtc.c
40198@@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *file,
40199 switch (cmd) {
40200
40201 case RTC_PLL_GET:
40202+ memset(&pll, 0, sizeof(pll));
40203 if (get_rtc_pll(&pll))
40204 return -EINVAL;
40205 else
40206diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
40207index d5d4cd8..22d561d 100644
40208--- a/drivers/char/hpet.c
40209+++ b/drivers/char/hpet.c
40210@@ -575,7 +575,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
40211 }
40212
40213 static int
40214-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
40215+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
40216 struct hpet_info *info)
40217 {
40218 struct hpet_timer __iomem *timer;
40219diff --git a/drivers/char/hw_random/intel-rng.c b/drivers/char/hw_random/intel-rng.c
40220index 86fe45c..c0ea948 100644
40221--- a/drivers/char/hw_random/intel-rng.c
40222+++ b/drivers/char/hw_random/intel-rng.c
40223@@ -314,7 +314,7 @@ PFX "RNG, try using the 'no_fwh_detect' option.\n";
40224
40225 if (no_fwh_detect)
40226 return -ENODEV;
40227- printk(warning);
40228+ printk("%s", warning);
40229 return -EBUSY;
40230 }
40231
40232diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
40233index e6db938..835e3a2 100644
40234--- a/drivers/char/ipmi/ipmi_msghandler.c
40235+++ b/drivers/char/ipmi/ipmi_msghandler.c
40236@@ -438,7 +438,7 @@ struct ipmi_smi {
40237 struct proc_dir_entry *proc_dir;
40238 char proc_dir_name[10];
40239
40240- atomic_t stats[IPMI_NUM_STATS];
40241+ atomic_unchecked_t stats[IPMI_NUM_STATS];
40242
40243 /*
40244 * run_to_completion duplicate of smb_info, smi_info
40245@@ -470,9 +470,9 @@ static LIST_HEAD(smi_watchers);
40246 static DEFINE_MUTEX(smi_watchers_mutex);
40247
40248 #define ipmi_inc_stat(intf, stat) \
40249- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
40250+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
40251 #define ipmi_get_stat(intf, stat) \
40252- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
40253+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
40254
40255 static int is_lan_addr(struct ipmi_addr *addr)
40256 {
40257@@ -2926,7 +2926,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
40258 INIT_LIST_HEAD(&intf->cmd_rcvrs);
40259 init_waitqueue_head(&intf->waitq);
40260 for (i = 0; i < IPMI_NUM_STATS; i++)
40261- atomic_set(&intf->stats[i], 0);
40262+ atomic_set_unchecked(&intf->stats[i], 0);
40263
40264 intf->proc_dir = NULL;
40265
40266diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
40267index 5d66568..c9d93c3 100644
40268--- a/drivers/char/ipmi/ipmi_si_intf.c
40269+++ b/drivers/char/ipmi/ipmi_si_intf.c
40270@@ -285,7 +285,7 @@ struct smi_info {
40271 unsigned char slave_addr;
40272
40273 /* Counters and things for the proc filesystem. */
40274- atomic_t stats[SI_NUM_STATS];
40275+ atomic_unchecked_t stats[SI_NUM_STATS];
40276
40277 struct task_struct *thread;
40278
40279@@ -294,9 +294,9 @@ struct smi_info {
40280 };
40281
40282 #define smi_inc_stat(smi, stat) \
40283- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
40284+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
40285 #define smi_get_stat(smi, stat) \
40286- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
40287+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
40288
40289 #define SI_MAX_PARMS 4
40290
40291@@ -3374,7 +3374,7 @@ static int try_smi_init(struct smi_info *new_smi)
40292 atomic_set(&new_smi->req_events, 0);
40293 new_smi->run_to_completion = false;
40294 for (i = 0; i < SI_NUM_STATS; i++)
40295- atomic_set(&new_smi->stats[i], 0);
40296+ atomic_set_unchecked(&new_smi->stats[i], 0);
40297
40298 new_smi->interrupt_disabled = true;
40299 atomic_set(&new_smi->stop_operation, 0);
40300diff --git a/drivers/char/mem.c b/drivers/char/mem.c
40301index 917403f..dddd899 100644
40302--- a/drivers/char/mem.c
40303+++ b/drivers/char/mem.c
40304@@ -18,6 +18,7 @@
40305 #include <linux/raw.h>
40306 #include <linux/tty.h>
40307 #include <linux/capability.h>
40308+#include <linux/security.h>
40309 #include <linux/ptrace.h>
40310 #include <linux/device.h>
40311 #include <linux/highmem.h>
40312@@ -36,6 +37,10 @@
40313
40314 #define DEVPORT_MINOR 4
40315
40316+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
40317+extern const struct file_operations grsec_fops;
40318+#endif
40319+
40320 static inline unsigned long size_inside_page(unsigned long start,
40321 unsigned long size)
40322 {
40323@@ -67,9 +72,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
40324
40325 while (cursor < to) {
40326 if (!devmem_is_allowed(pfn)) {
40327+#ifdef CONFIG_GRKERNSEC_KMEM
40328+ gr_handle_mem_readwrite(from, to);
40329+#else
40330 printk(KERN_INFO
40331 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
40332 current->comm, from, to);
40333+#endif
40334 return 0;
40335 }
40336 cursor += PAGE_SIZE;
40337@@ -77,6 +86,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
40338 }
40339 return 1;
40340 }
40341+#elif defined(CONFIG_GRKERNSEC_KMEM)
40342+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
40343+{
40344+ return 0;
40345+}
40346 #else
40347 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
40348 {
40349@@ -122,6 +136,7 @@ static ssize_t read_mem(struct file *file, char __user *buf,
40350
40351 while (count > 0) {
40352 unsigned long remaining;
40353+ char *temp;
40354
40355 sz = size_inside_page(p, count);
40356
40357@@ -137,7 +152,23 @@ static ssize_t read_mem(struct file *file, char __user *buf,
40358 if (!ptr)
40359 return -EFAULT;
40360
40361- remaining = copy_to_user(buf, ptr, sz);
40362+#ifdef CONFIG_PAX_USERCOPY
40363+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
40364+ if (!temp) {
40365+ unxlate_dev_mem_ptr(p, ptr);
40366+ return -ENOMEM;
40367+ }
40368+ memcpy(temp, ptr, sz);
40369+#else
40370+ temp = ptr;
40371+#endif
40372+
40373+ remaining = copy_to_user(buf, temp, sz);
40374+
40375+#ifdef CONFIG_PAX_USERCOPY
40376+ kfree(temp);
40377+#endif
40378+
40379 unxlate_dev_mem_ptr(p, ptr);
40380 if (remaining)
40381 return -EFAULT;
40382@@ -369,9 +400,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
40383 size_t count, loff_t *ppos)
40384 {
40385 unsigned long p = *ppos;
40386- ssize_t low_count, read, sz;
40387+ ssize_t low_count, read, sz, err = 0;
40388 char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
40389- int err = 0;
40390
40391 read = 0;
40392 if (p < (unsigned long) high_memory) {
40393@@ -393,6 +423,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
40394 }
40395 #endif
40396 while (low_count > 0) {
40397+ char *temp;
40398+
40399 sz = size_inside_page(p, low_count);
40400
40401 /*
40402@@ -402,7 +434,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
40403 */
40404 kbuf = xlate_dev_kmem_ptr((char *)p);
40405
40406- if (copy_to_user(buf, kbuf, sz))
40407+#ifdef CONFIG_PAX_USERCOPY
40408+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
40409+ if (!temp)
40410+ return -ENOMEM;
40411+ memcpy(temp, kbuf, sz);
40412+#else
40413+ temp = kbuf;
40414+#endif
40415+
40416+ err = copy_to_user(buf, temp, sz);
40417+
40418+#ifdef CONFIG_PAX_USERCOPY
40419+ kfree(temp);
40420+#endif
40421+
40422+ if (err)
40423 return -EFAULT;
40424 buf += sz;
40425 p += sz;
40426@@ -827,6 +874,9 @@ static const struct memdev {
40427 #ifdef CONFIG_PRINTK
40428 [11] = { "kmsg", 0644, &kmsg_fops, NULL },
40429 #endif
40430+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
40431+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
40432+#endif
40433 };
40434
40435 static int memory_open(struct inode *inode, struct file *filp)
40436@@ -898,7 +948,7 @@ static int __init chr_dev_init(void)
40437 continue;
40438
40439 device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
40440- NULL, devlist[minor].name);
40441+ NULL, "%s", devlist[minor].name);
40442 }
40443
40444 return tty_init();
40445diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
40446index 9df78e2..01ba9ae 100644
40447--- a/drivers/char/nvram.c
40448+++ b/drivers/char/nvram.c
40449@@ -247,7 +247,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
40450
40451 spin_unlock_irq(&rtc_lock);
40452
40453- if (copy_to_user(buf, contents, tmp - contents))
40454+ if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
40455 return -EFAULT;
40456
40457 *ppos = i;
40458diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
40459index 0ea9986..e7b07e4 100644
40460--- a/drivers/char/pcmcia/synclink_cs.c
40461+++ b/drivers/char/pcmcia/synclink_cs.c
40462@@ -2345,7 +2345,7 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
40463
40464 if (debug_level >= DEBUG_LEVEL_INFO)
40465 printk("%s(%d):mgslpc_close(%s) entry, count=%d\n",
40466- __FILE__, __LINE__, info->device_name, port->count);
40467+ __FILE__, __LINE__, info->device_name, atomic_read(&port->count));
40468
40469 if (tty_port_close_start(port, tty, filp) == 0)
40470 goto cleanup;
40471@@ -2363,7 +2363,7 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
40472 cleanup:
40473 if (debug_level >= DEBUG_LEVEL_INFO)
40474 printk("%s(%d):mgslpc_close(%s) exit, count=%d\n", __FILE__, __LINE__,
40475- tty->driver->name, port->count);
40476+ tty->driver->name, atomic_read(&port->count));
40477 }
40478
40479 /* Wait until the transmitter is empty.
40480@@ -2505,7 +2505,7 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
40481
40482 if (debug_level >= DEBUG_LEVEL_INFO)
40483 printk("%s(%d):mgslpc_open(%s), old ref count = %d\n",
40484- __FILE__, __LINE__, tty->driver->name, port->count);
40485+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
40486
40487 /* If port is closing, signal caller to try again */
40488 if (port->flags & ASYNC_CLOSING){
40489@@ -2525,11 +2525,11 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
40490 goto cleanup;
40491 }
40492 spin_lock(&port->lock);
40493- port->count++;
40494+ atomic_inc(&port->count);
40495 spin_unlock(&port->lock);
40496 spin_unlock_irqrestore(&info->netlock, flags);
40497
40498- if (port->count == 1) {
40499+ if (atomic_read(&port->count) == 1) {
40500 /* 1st open on this device, init hardware */
40501 retval = startup(info, tty);
40502 if (retval < 0)
40503@@ -3918,7 +3918,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
40504 unsigned short new_crctype;
40505
40506 /* return error if TTY interface open */
40507- if (info->port.count)
40508+ if (atomic_read(&info->port.count))
40509 return -EBUSY;
40510
40511 switch (encoding)
40512@@ -4022,7 +4022,7 @@ static int hdlcdev_open(struct net_device *dev)
40513
40514 /* arbitrate between network and tty opens */
40515 spin_lock_irqsave(&info->netlock, flags);
40516- if (info->port.count != 0 || info->netcount != 0) {
40517+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
40518 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
40519 spin_unlock_irqrestore(&info->netlock, flags);
40520 return -EBUSY;
40521@@ -4112,7 +4112,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
40522 printk("%s:hdlcdev_ioctl(%s)\n", __FILE__, dev->name);
40523
40524 /* return error if TTY interface open */
40525- if (info->port.count)
40526+ if (atomic_read(&info->port.count))
40527 return -EBUSY;
40528
40529 if (cmd != SIOCWANDEV)
40530diff --git a/drivers/char/random.c b/drivers/char/random.c
40531index c18d41d..7c499f3 100644
40532--- a/drivers/char/random.c
40533+++ b/drivers/char/random.c
40534@@ -289,9 +289,6 @@
40535 /*
40536 * To allow fractional bits to be tracked, the entropy_count field is
40537 * denominated in units of 1/8th bits.
40538- *
40539- * 2*(ENTROPY_SHIFT + log2(poolbits)) must <= 31, or the multiply in
40540- * credit_entropy_bits() needs to be 64 bits wide.
40541 */
40542 #define ENTROPY_SHIFT 3
40543 #define ENTROPY_BITS(r) ((r)->entropy_count >> ENTROPY_SHIFT)
40544@@ -439,9 +436,9 @@ struct entropy_store {
40545 };
40546
40547 static void push_to_pool(struct work_struct *work);
40548-static __u32 input_pool_data[INPUT_POOL_WORDS];
40549-static __u32 blocking_pool_data[OUTPUT_POOL_WORDS];
40550-static __u32 nonblocking_pool_data[OUTPUT_POOL_WORDS];
40551+static __u32 input_pool_data[INPUT_POOL_WORDS] __latent_entropy;
40552+static __u32 blocking_pool_data[OUTPUT_POOL_WORDS] __latent_entropy;
40553+static __u32 nonblocking_pool_data[OUTPUT_POOL_WORDS] __latent_entropy;
40554
40555 static struct entropy_store input_pool = {
40556 .poolinfo = &poolinfo_table[0],
40557@@ -635,7 +632,7 @@ retry:
40558 /* The +2 corresponds to the /4 in the denominator */
40559
40560 do {
40561- unsigned int anfrac = min(pnfrac, pool_size/2);
40562+ u64 anfrac = min(pnfrac, pool_size/2);
40563 unsigned int add =
40564 ((pool_size - entropy_count)*anfrac*3) >> s;
40565
40566@@ -1106,7 +1103,7 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
40567 __mix_pool_bytes(r, hash.w, sizeof(hash.w));
40568 spin_unlock_irqrestore(&r->lock, flags);
40569
40570- memset(workspace, 0, sizeof(workspace));
40571+ memzero_explicit(workspace, sizeof(workspace));
40572
40573 /*
40574 * In case the hash function has some recognizable output
40575@@ -1118,7 +1115,7 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
40576 hash.w[2] ^= rol32(hash.w[2], 16);
40577
40578 memcpy(out, &hash, EXTRACT_SIZE);
40579- memset(&hash, 0, sizeof(hash));
40580+ memzero_explicit(&hash, sizeof(hash));
40581 }
40582
40583 /*
40584@@ -1175,7 +1172,7 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
40585 }
40586
40587 /* Wipe data just returned from memory */
40588- memset(tmp, 0, sizeof(tmp));
40589+ memzero_explicit(tmp, sizeof(tmp));
40590
40591 return ret;
40592 }
40593@@ -1207,7 +1204,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
40594
40595 extract_buf(r, tmp);
40596 i = min_t(int, nbytes, EXTRACT_SIZE);
40597- if (copy_to_user(buf, tmp, i)) {
40598+ if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
40599 ret = -EFAULT;
40600 break;
40601 }
40602@@ -1218,7 +1215,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
40603 }
40604
40605 /* Wipe data just returned from memory */
40606- memset(tmp, 0, sizeof(tmp));
40607+ memzero_explicit(tmp, sizeof(tmp));
40608
40609 return ret;
40610 }
40611@@ -1590,7 +1587,7 @@ static char sysctl_bootid[16];
40612 static int proc_do_uuid(struct ctl_table *table, int write,
40613 void __user *buffer, size_t *lenp, loff_t *ppos)
40614 {
40615- struct ctl_table fake_table;
40616+ ctl_table_no_const fake_table;
40617 unsigned char buf[64], tmp_uuid[16], *uuid;
40618
40619 uuid = table->data;
40620@@ -1620,7 +1617,7 @@ static int proc_do_uuid(struct ctl_table *table, int write,
40621 static int proc_do_entropy(struct ctl_table *table, int write,
40622 void __user *buffer, size_t *lenp, loff_t *ppos)
40623 {
40624- struct ctl_table fake_table;
40625+ ctl_table_no_const fake_table;
40626 int entropy_count;
40627
40628 entropy_count = *(int *)table->data >> ENTROPY_SHIFT;
40629diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
40630index 7cc1fe22..b602d6b 100644
40631--- a/drivers/char/sonypi.c
40632+++ b/drivers/char/sonypi.c
40633@@ -54,6 +54,7 @@
40634
40635 #include <asm/uaccess.h>
40636 #include <asm/io.h>
40637+#include <asm/local.h>
40638
40639 #include <linux/sonypi.h>
40640
40641@@ -490,7 +491,7 @@ static struct sonypi_device {
40642 spinlock_t fifo_lock;
40643 wait_queue_head_t fifo_proc_list;
40644 struct fasync_struct *fifo_async;
40645- int open_count;
40646+ local_t open_count;
40647 int model;
40648 struct input_dev *input_jog_dev;
40649 struct input_dev *input_key_dev;
40650@@ -892,7 +893,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
40651 static int sonypi_misc_release(struct inode *inode, struct file *file)
40652 {
40653 mutex_lock(&sonypi_device.lock);
40654- sonypi_device.open_count--;
40655+ local_dec(&sonypi_device.open_count);
40656 mutex_unlock(&sonypi_device.lock);
40657 return 0;
40658 }
40659@@ -901,9 +902,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
40660 {
40661 mutex_lock(&sonypi_device.lock);
40662 /* Flush input queue on first open */
40663- if (!sonypi_device.open_count)
40664+ if (!local_read(&sonypi_device.open_count))
40665 kfifo_reset(&sonypi_device.fifo);
40666- sonypi_device.open_count++;
40667+ local_inc(&sonypi_device.open_count);
40668 mutex_unlock(&sonypi_device.lock);
40669
40670 return 0;
40671diff --git a/drivers/char/tpm/tpm_acpi.c b/drivers/char/tpm/tpm_acpi.c
40672index 565a947..dcdc06e 100644
40673--- a/drivers/char/tpm/tpm_acpi.c
40674+++ b/drivers/char/tpm/tpm_acpi.c
40675@@ -98,11 +98,12 @@ int read_log(struct tpm_bios_log *log)
40676 virt = acpi_os_map_iomem(start, len);
40677 if (!virt) {
40678 kfree(log->bios_event_log);
40679+ log->bios_event_log = NULL;
40680 printk("%s: ERROR - Unable to map memory\n", __func__);
40681 return -EIO;
40682 }
40683
40684- memcpy_fromio(log->bios_event_log, virt, len);
40685+ memcpy_fromio(log->bios_event_log, (const char __force_kernel *)virt, len);
40686
40687 acpi_os_unmap_iomem(virt, len);
40688 return 0;
40689diff --git a/drivers/char/tpm/tpm_eventlog.c b/drivers/char/tpm/tpm_eventlog.c
40690index 3a56a13..f8cbd25 100644
40691--- a/drivers/char/tpm/tpm_eventlog.c
40692+++ b/drivers/char/tpm/tpm_eventlog.c
40693@@ -95,7 +95,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
40694 event = addr;
40695
40696 if ((event->event_type == 0 && event->event_size == 0) ||
40697- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
40698+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
40699 return NULL;
40700
40701 return addr;
40702@@ -120,7 +120,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
40703 return NULL;
40704
40705 if ((event->event_type == 0 && event->event_size == 0) ||
40706- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
40707+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
40708 return NULL;
40709
40710 (*pos)++;
40711@@ -213,7 +213,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
40712 int i;
40713
40714 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
40715- seq_putc(m, data[i]);
40716+ if (!seq_putc(m, data[i]))
40717+ return -EFAULT;
40718
40719 return 0;
40720 }
40721diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
40722index b585b47..488f43e 100644
40723--- a/drivers/char/virtio_console.c
40724+++ b/drivers/char/virtio_console.c
40725@@ -684,7 +684,7 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
40726 if (to_user) {
40727 ssize_t ret;
40728
40729- ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
40730+ ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
40731 if (ret)
40732 return -EFAULT;
40733 } else {
40734@@ -787,7 +787,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
40735 if (!port_has_data(port) && !port->host_connected)
40736 return 0;
40737
40738- return fill_readbuf(port, ubuf, count, true);
40739+ return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
40740 }
40741
40742 static int wait_port_writable(struct port *port, bool nonblock)
40743diff --git a/drivers/clk/clk-composite.c b/drivers/clk/clk-composite.c
40744index b9355da..9611f4e 100644
40745--- a/drivers/clk/clk-composite.c
40746+++ b/drivers/clk/clk-composite.c
40747@@ -191,7 +191,7 @@ struct clk *clk_register_composite(struct device *dev, const char *name,
40748 struct clk *clk;
40749 struct clk_init_data init;
40750 struct clk_composite *composite;
40751- struct clk_ops *clk_composite_ops;
40752+ clk_ops_no_const *clk_composite_ops;
40753
40754 composite = kzalloc(sizeof(*composite), GFP_KERNEL);
40755 if (!composite) {
40756diff --git a/drivers/clk/socfpga/clk-gate.c b/drivers/clk/socfpga/clk-gate.c
40757index dd3a78c..386d49c 100644
40758--- a/drivers/clk/socfpga/clk-gate.c
40759+++ b/drivers/clk/socfpga/clk-gate.c
40760@@ -22,6 +22,7 @@
40761 #include <linux/mfd/syscon.h>
40762 #include <linux/of.h>
40763 #include <linux/regmap.h>
40764+#include <asm/pgtable.h>
40765
40766 #include "clk.h"
40767
40768@@ -174,7 +175,7 @@ static int socfpga_clk_prepare(struct clk_hw *hwclk)
40769 return 0;
40770 }
40771
40772-static struct clk_ops gateclk_ops = {
40773+static clk_ops_no_const gateclk_ops __read_only = {
40774 .prepare = socfpga_clk_prepare,
40775 .recalc_rate = socfpga_clk_recalc_rate,
40776 .get_parent = socfpga_clk_get_parent,
40777@@ -208,8 +209,10 @@ static void __init __socfpga_gate_init(struct device_node *node,
40778 socfpga_clk->hw.reg = clk_mgr_base_addr + clk_gate[0];
40779 socfpga_clk->hw.bit_idx = clk_gate[1];
40780
40781- gateclk_ops.enable = clk_gate_ops.enable;
40782- gateclk_ops.disable = clk_gate_ops.disable;
40783+ pax_open_kernel();
40784+ *(void **)&gateclk_ops.enable = clk_gate_ops.enable;
40785+ *(void **)&gateclk_ops.disable = clk_gate_ops.disable;
40786+ pax_close_kernel();
40787 }
40788
40789 rc = of_property_read_u32(node, "fixed-divider", &fixed_div);
40790diff --git a/drivers/clk/socfpga/clk-pll.c b/drivers/clk/socfpga/clk-pll.c
40791index de6da95..c98278b 100644
40792--- a/drivers/clk/socfpga/clk-pll.c
40793+++ b/drivers/clk/socfpga/clk-pll.c
40794@@ -21,6 +21,7 @@
40795 #include <linux/io.h>
40796 #include <linux/of.h>
40797 #include <linux/of_address.h>
40798+#include <asm/pgtable.h>
40799
40800 #include "clk.h"
40801
40802@@ -76,7 +77,7 @@ static u8 clk_pll_get_parent(struct clk_hw *hwclk)
40803 CLK_MGR_PLL_CLK_SRC_MASK;
40804 }
40805
40806-static struct clk_ops clk_pll_ops = {
40807+static clk_ops_no_const clk_pll_ops __read_only = {
40808 .recalc_rate = clk_pll_recalc_rate,
40809 .get_parent = clk_pll_get_parent,
40810 };
40811@@ -120,8 +121,10 @@ static __init struct clk *__socfpga_pll_init(struct device_node *node,
40812 pll_clk->hw.hw.init = &init;
40813
40814 pll_clk->hw.bit_idx = SOCFPGA_PLL_EXT_ENA;
40815- clk_pll_ops.enable = clk_gate_ops.enable;
40816- clk_pll_ops.disable = clk_gate_ops.disable;
40817+ pax_open_kernel();
40818+ *(void **)&clk_pll_ops.enable = clk_gate_ops.enable;
40819+ *(void **)&clk_pll_ops.disable = clk_gate_ops.disable;
40820+ pax_close_kernel();
40821
40822 clk = clk_register(NULL, &pll_clk->hw.hw);
40823 if (WARN_ON(IS_ERR(clk))) {
40824diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
40825index b0c18ed..1713a80 100644
40826--- a/drivers/cpufreq/acpi-cpufreq.c
40827+++ b/drivers/cpufreq/acpi-cpufreq.c
40828@@ -675,8 +675,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
40829 data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu);
40830 per_cpu(acfreq_data, cpu) = data;
40831
40832- if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
40833- acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
40834+ if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
40835+ pax_open_kernel();
40836+ *(u8 *)&acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
40837+ pax_close_kernel();
40838+ }
40839
40840 result = acpi_processor_register_performance(data->acpi_data, cpu);
40841 if (result)
40842@@ -809,7 +812,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
40843 policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
40844 break;
40845 case ACPI_ADR_SPACE_FIXED_HARDWARE:
40846- acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
40847+ pax_open_kernel();
40848+ *(void **)&acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
40849+ pax_close_kernel();
40850 break;
40851 default:
40852 break;
40853@@ -903,8 +908,10 @@ static void __init acpi_cpufreq_boost_init(void)
40854 if (!msrs)
40855 return;
40856
40857- acpi_cpufreq_driver.boost_supported = true;
40858- acpi_cpufreq_driver.boost_enabled = boost_state(0);
40859+ pax_open_kernel();
40860+ *(bool *)&acpi_cpufreq_driver.boost_supported = true;
40861+ *(bool *)&acpi_cpufreq_driver.boost_enabled = boost_state(0);
40862+ pax_close_kernel();
40863
40864 cpu_notifier_register_begin();
40865
40866diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
40867index 61190f6..fcd899a 100644
40868--- a/drivers/cpufreq/cpufreq.c
40869+++ b/drivers/cpufreq/cpufreq.c
40870@@ -2095,7 +2095,7 @@ void cpufreq_unregister_governor(struct cpufreq_governor *governor)
40871 }
40872
40873 mutex_lock(&cpufreq_governor_mutex);
40874- list_del(&governor->governor_list);
40875+ pax_list_del(&governor->governor_list);
40876 mutex_unlock(&cpufreq_governor_mutex);
40877 return;
40878 }
40879@@ -2311,7 +2311,7 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
40880 return NOTIFY_OK;
40881 }
40882
40883-static struct notifier_block __refdata cpufreq_cpu_notifier = {
40884+static struct notifier_block cpufreq_cpu_notifier = {
40885 .notifier_call = cpufreq_cpu_callback,
40886 };
40887
40888@@ -2351,13 +2351,17 @@ int cpufreq_boost_trigger_state(int state)
40889 return 0;
40890
40891 write_lock_irqsave(&cpufreq_driver_lock, flags);
40892- cpufreq_driver->boost_enabled = state;
40893+ pax_open_kernel();
40894+ *(bool *)&cpufreq_driver->boost_enabled = state;
40895+ pax_close_kernel();
40896 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
40897
40898 ret = cpufreq_driver->set_boost(state);
40899 if (ret) {
40900 write_lock_irqsave(&cpufreq_driver_lock, flags);
40901- cpufreq_driver->boost_enabled = !state;
40902+ pax_open_kernel();
40903+ *(bool *)&cpufreq_driver->boost_enabled = !state;
40904+ pax_close_kernel();
40905 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
40906
40907 pr_err("%s: Cannot %s BOOST\n",
40908@@ -2414,8 +2418,11 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
40909
40910 pr_debug("trying to register driver %s\n", driver_data->name);
40911
40912- if (driver_data->setpolicy)
40913- driver_data->flags |= CPUFREQ_CONST_LOOPS;
40914+ if (driver_data->setpolicy) {
40915+ pax_open_kernel();
40916+ *(u8 *)&driver_data->flags |= CPUFREQ_CONST_LOOPS;
40917+ pax_close_kernel();
40918+ }
40919
40920 write_lock_irqsave(&cpufreq_driver_lock, flags);
40921 if (cpufreq_driver) {
40922@@ -2430,8 +2437,11 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
40923 * Check if driver provides function to enable boost -
40924 * if not, use cpufreq_boost_set_sw as default
40925 */
40926- if (!cpufreq_driver->set_boost)
40927- cpufreq_driver->set_boost = cpufreq_boost_set_sw;
40928+ if (!cpufreq_driver->set_boost) {
40929+ pax_open_kernel();
40930+ *(void **)&cpufreq_driver->set_boost = cpufreq_boost_set_sw;
40931+ pax_close_kernel();
40932+ }
40933
40934 ret = cpufreq_sysfs_create_file(&boost.attr);
40935 if (ret) {
40936diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
40937index 1b44496..b80ff5e 100644
40938--- a/drivers/cpufreq/cpufreq_governor.c
40939+++ b/drivers/cpufreq/cpufreq_governor.c
40940@@ -245,7 +245,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
40941 struct dbs_data *dbs_data;
40942 struct od_cpu_dbs_info_s *od_dbs_info = NULL;
40943 struct cs_cpu_dbs_info_s *cs_dbs_info = NULL;
40944- struct od_ops *od_ops = NULL;
40945+ const struct od_ops *od_ops = NULL;
40946 struct od_dbs_tuners *od_tuners = NULL;
40947 struct cs_dbs_tuners *cs_tuners = NULL;
40948 struct cpu_dbs_common_info *cpu_cdbs;
40949@@ -311,7 +311,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
40950
40951 if ((cdata->governor == GOV_CONSERVATIVE) &&
40952 (!policy->governor->initialized)) {
40953- struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
40954+ const struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
40955
40956 cpufreq_register_notifier(cs_ops->notifier_block,
40957 CPUFREQ_TRANSITION_NOTIFIER);
40958@@ -331,7 +331,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
40959
40960 if ((dbs_data->cdata->governor == GOV_CONSERVATIVE) &&
40961 (policy->governor->initialized == 1)) {
40962- struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
40963+ const struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
40964
40965 cpufreq_unregister_notifier(cs_ops->notifier_block,
40966 CPUFREQ_TRANSITION_NOTIFIER);
40967diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
40968index cc401d1..8197340 100644
40969--- a/drivers/cpufreq/cpufreq_governor.h
40970+++ b/drivers/cpufreq/cpufreq_governor.h
40971@@ -212,7 +212,7 @@ struct common_dbs_data {
40972 void (*exit)(struct dbs_data *dbs_data);
40973
40974 /* Governor specific ops, see below */
40975- void *gov_ops;
40976+ const void *gov_ops;
40977 };
40978
40979 /* Governor Per policy data */
40980@@ -232,7 +232,7 @@ struct od_ops {
40981 unsigned int (*powersave_bias_target)(struct cpufreq_policy *policy,
40982 unsigned int freq_next, unsigned int relation);
40983 void (*freq_increase)(struct cpufreq_policy *policy, unsigned int freq);
40984-};
40985+} __no_const;
40986
40987 struct cs_ops {
40988 struct notifier_block *notifier_block;
40989diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
40990index ad3f38f..8f086cd 100644
40991--- a/drivers/cpufreq/cpufreq_ondemand.c
40992+++ b/drivers/cpufreq/cpufreq_ondemand.c
40993@@ -524,7 +524,7 @@ static void od_exit(struct dbs_data *dbs_data)
40994
40995 define_get_cpu_dbs_routines(od_cpu_dbs_info);
40996
40997-static struct od_ops od_ops = {
40998+static struct od_ops od_ops __read_only = {
40999 .powersave_bias_init_cpu = ondemand_powersave_bias_init_cpu,
41000 .powersave_bias_target = generic_powersave_bias_target,
41001 .freq_increase = dbs_freq_increase,
41002@@ -579,14 +579,18 @@ void od_register_powersave_bias_handler(unsigned int (*f)
41003 (struct cpufreq_policy *, unsigned int, unsigned int),
41004 unsigned int powersave_bias)
41005 {
41006- od_ops.powersave_bias_target = f;
41007+ pax_open_kernel();
41008+ *(void **)&od_ops.powersave_bias_target = f;
41009+ pax_close_kernel();
41010 od_set_powersave_bias(powersave_bias);
41011 }
41012 EXPORT_SYMBOL_GPL(od_register_powersave_bias_handler);
41013
41014 void od_unregister_powersave_bias_handler(void)
41015 {
41016- od_ops.powersave_bias_target = generic_powersave_bias_target;
41017+ pax_open_kernel();
41018+ *(void **)&od_ops.powersave_bias_target = generic_powersave_bias_target;
41019+ pax_close_kernel();
41020 od_set_powersave_bias(0);
41021 }
41022 EXPORT_SYMBOL_GPL(od_unregister_powersave_bias_handler);
41023diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
41024index 0668b38..2f3ea18 100644
41025--- a/drivers/cpufreq/intel_pstate.c
41026+++ b/drivers/cpufreq/intel_pstate.c
41027@@ -120,10 +120,10 @@ struct pstate_funcs {
41028 struct cpu_defaults {
41029 struct pstate_adjust_policy pid_policy;
41030 struct pstate_funcs funcs;
41031-};
41032+} __do_const;
41033
41034 static struct pstate_adjust_policy pid_params;
41035-static struct pstate_funcs pstate_funcs;
41036+static struct pstate_funcs *pstate_funcs;
41037
41038 struct perf_limits {
41039 int no_turbo;
41040@@ -527,17 +527,17 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
41041
41042 cpu->pstate.current_pstate = pstate;
41043
41044- pstate_funcs.set(cpu, pstate);
41045+ pstate_funcs->set(cpu, pstate);
41046 }
41047
41048 static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
41049 {
41050- cpu->pstate.min_pstate = pstate_funcs.get_min();
41051- cpu->pstate.max_pstate = pstate_funcs.get_max();
41052- cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
41053+ cpu->pstate.min_pstate = pstate_funcs->get_min();
41054+ cpu->pstate.max_pstate = pstate_funcs->get_max();
41055+ cpu->pstate.turbo_pstate = pstate_funcs->get_turbo();
41056
41057- if (pstate_funcs.get_vid)
41058- pstate_funcs.get_vid(cpu);
41059+ if (pstate_funcs->get_vid)
41060+ pstate_funcs->get_vid(cpu);
41061 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
41062 }
41063
41064@@ -810,9 +810,9 @@ static int intel_pstate_msrs_not_valid(void)
41065 rdmsrl(MSR_IA32_APERF, aperf);
41066 rdmsrl(MSR_IA32_MPERF, mperf);
41067
41068- if (!pstate_funcs.get_max() ||
41069- !pstate_funcs.get_min() ||
41070- !pstate_funcs.get_turbo())
41071+ if (!pstate_funcs->get_max() ||
41072+ !pstate_funcs->get_min() ||
41073+ !pstate_funcs->get_turbo())
41074 return -ENODEV;
41075
41076 rdmsrl(MSR_IA32_APERF, tmp);
41077@@ -826,7 +826,7 @@ static int intel_pstate_msrs_not_valid(void)
41078 return 0;
41079 }
41080
41081-static void copy_pid_params(struct pstate_adjust_policy *policy)
41082+static void copy_pid_params(const struct pstate_adjust_policy *policy)
41083 {
41084 pid_params.sample_rate_ms = policy->sample_rate_ms;
41085 pid_params.p_gain_pct = policy->p_gain_pct;
41086@@ -838,11 +838,7 @@ static void copy_pid_params(struct pstate_adjust_policy *policy)
41087
41088 static void copy_cpu_funcs(struct pstate_funcs *funcs)
41089 {
41090- pstate_funcs.get_max = funcs->get_max;
41091- pstate_funcs.get_min = funcs->get_min;
41092- pstate_funcs.get_turbo = funcs->get_turbo;
41093- pstate_funcs.set = funcs->set;
41094- pstate_funcs.get_vid = funcs->get_vid;
41095+ pstate_funcs = funcs;
41096 }
41097
41098 #if IS_ENABLED(CONFIG_ACPI)
41099diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c
41100index 529cfd9..0e28fff 100644
41101--- a/drivers/cpufreq/p4-clockmod.c
41102+++ b/drivers/cpufreq/p4-clockmod.c
41103@@ -134,10 +134,14 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
41104 case 0x0F: /* Core Duo */
41105 case 0x16: /* Celeron Core */
41106 case 0x1C: /* Atom */
41107- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
41108+ pax_open_kernel();
41109+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
41110+ pax_close_kernel();
41111 return speedstep_get_frequency(SPEEDSTEP_CPU_PCORE);
41112 case 0x0D: /* Pentium M (Dothan) */
41113- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
41114+ pax_open_kernel();
41115+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
41116+ pax_close_kernel();
41117 /* fall through */
41118 case 0x09: /* Pentium M (Banias) */
41119 return speedstep_get_frequency(SPEEDSTEP_CPU_PM);
41120@@ -149,7 +153,9 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
41121
41122 /* on P-4s, the TSC runs with constant frequency independent whether
41123 * throttling is active or not. */
41124- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
41125+ pax_open_kernel();
41126+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
41127+ pax_close_kernel();
41128
41129 if (speedstep_detect_processor() == SPEEDSTEP_CPU_P4M) {
41130 printk(KERN_WARNING PFX "Warning: Pentium 4-M detected. "
41131diff --git a/drivers/cpufreq/sparc-us3-cpufreq.c b/drivers/cpufreq/sparc-us3-cpufreq.c
41132index 9bb42ba..b01b4a2 100644
41133--- a/drivers/cpufreq/sparc-us3-cpufreq.c
41134+++ b/drivers/cpufreq/sparc-us3-cpufreq.c
41135@@ -18,14 +18,12 @@
41136 #include <asm/head.h>
41137 #include <asm/timer.h>
41138
41139-static struct cpufreq_driver *cpufreq_us3_driver;
41140-
41141 struct us3_freq_percpu_info {
41142 struct cpufreq_frequency_table table[4];
41143 };
41144
41145 /* Indexed by cpu number. */
41146-static struct us3_freq_percpu_info *us3_freq_table;
41147+static struct us3_freq_percpu_info us3_freq_table[NR_CPUS];
41148
41149 /* UltraSPARC-III has three dividers: 1, 2, and 32. These are controlled
41150 * in the Safari config register.
41151@@ -156,16 +154,27 @@ static int __init us3_freq_cpu_init(struct cpufreq_policy *policy)
41152
41153 static int us3_freq_cpu_exit(struct cpufreq_policy *policy)
41154 {
41155- if (cpufreq_us3_driver)
41156- us3_freq_target(policy, 0);
41157+ us3_freq_target(policy, 0);
41158
41159 return 0;
41160 }
41161
41162+static int __init us3_freq_init(void);
41163+static void __exit us3_freq_exit(void);
41164+
41165+static struct cpufreq_driver cpufreq_us3_driver = {
41166+ .init = us3_freq_cpu_init,
41167+ .verify = cpufreq_generic_frequency_table_verify,
41168+ .target_index = us3_freq_target,
41169+ .get = us3_freq_get,
41170+ .exit = us3_freq_cpu_exit,
41171+ .name = "UltraSPARC-III",
41172+
41173+};
41174+
41175 static int __init us3_freq_init(void)
41176 {
41177 unsigned long manuf, impl, ver;
41178- int ret;
41179
41180 if (tlb_type != cheetah && tlb_type != cheetah_plus)
41181 return -ENODEV;
41182@@ -178,55 +187,15 @@ static int __init us3_freq_init(void)
41183 (impl == CHEETAH_IMPL ||
41184 impl == CHEETAH_PLUS_IMPL ||
41185 impl == JAGUAR_IMPL ||
41186- impl == PANTHER_IMPL)) {
41187- struct cpufreq_driver *driver;
41188-
41189- ret = -ENOMEM;
41190- driver = kzalloc(sizeof(*driver), GFP_KERNEL);
41191- if (!driver)
41192- goto err_out;
41193-
41194- us3_freq_table = kzalloc((NR_CPUS * sizeof(*us3_freq_table)),
41195- GFP_KERNEL);
41196- if (!us3_freq_table)
41197- goto err_out;
41198-
41199- driver->init = us3_freq_cpu_init;
41200- driver->verify = cpufreq_generic_frequency_table_verify;
41201- driver->target_index = us3_freq_target;
41202- driver->get = us3_freq_get;
41203- driver->exit = us3_freq_cpu_exit;
41204- strcpy(driver->name, "UltraSPARC-III");
41205-
41206- cpufreq_us3_driver = driver;
41207- ret = cpufreq_register_driver(driver);
41208- if (ret)
41209- goto err_out;
41210-
41211- return 0;
41212-
41213-err_out:
41214- if (driver) {
41215- kfree(driver);
41216- cpufreq_us3_driver = NULL;
41217- }
41218- kfree(us3_freq_table);
41219- us3_freq_table = NULL;
41220- return ret;
41221- }
41222+ impl == PANTHER_IMPL))
41223+ return cpufreq_register_driver(&cpufreq_us3_driver);
41224
41225 return -ENODEV;
41226 }
41227
41228 static void __exit us3_freq_exit(void)
41229 {
41230- if (cpufreq_us3_driver) {
41231- cpufreq_unregister_driver(cpufreq_us3_driver);
41232- kfree(cpufreq_us3_driver);
41233- cpufreq_us3_driver = NULL;
41234- kfree(us3_freq_table);
41235- us3_freq_table = NULL;
41236- }
41237+ cpufreq_unregister_driver(&cpufreq_us3_driver);
41238 }
41239
41240 MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
41241diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c
41242index 7d4a315..21bb886 100644
41243--- a/drivers/cpufreq/speedstep-centrino.c
41244+++ b/drivers/cpufreq/speedstep-centrino.c
41245@@ -351,8 +351,11 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
41246 !cpu_has(cpu, X86_FEATURE_EST))
41247 return -ENODEV;
41248
41249- if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC))
41250- centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
41251+ if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC)) {
41252+ pax_open_kernel();
41253+ *(u8 *)&centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
41254+ pax_close_kernel();
41255+ }
41256
41257 if (policy->cpu != 0)
41258 return -ENODEV;
41259diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
41260index e431d11..d0b997e 100644
41261--- a/drivers/cpuidle/driver.c
41262+++ b/drivers/cpuidle/driver.c
41263@@ -194,7 +194,7 @@ static int poll_idle(struct cpuidle_device *dev,
41264
41265 static void poll_idle_init(struct cpuidle_driver *drv)
41266 {
41267- struct cpuidle_state *state = &drv->states[0];
41268+ cpuidle_state_no_const *state = &drv->states[0];
41269
41270 snprintf(state->name, CPUIDLE_NAME_LEN, "POLL");
41271 snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
41272diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c
41273index ca89412..a7b9c49 100644
41274--- a/drivers/cpuidle/governor.c
41275+++ b/drivers/cpuidle/governor.c
41276@@ -87,7 +87,7 @@ int cpuidle_register_governor(struct cpuidle_governor *gov)
41277 mutex_lock(&cpuidle_lock);
41278 if (__cpuidle_find_governor(gov->name) == NULL) {
41279 ret = 0;
41280- list_add_tail(&gov->governor_list, &cpuidle_governors);
41281+ pax_list_add_tail((struct list_head *)&gov->governor_list, &cpuidle_governors);
41282 if (!cpuidle_curr_governor ||
41283 cpuidle_curr_governor->rating < gov->rating)
41284 cpuidle_switch_governor(gov);
41285diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
41286index 97c5903..023ad23 100644
41287--- a/drivers/cpuidle/sysfs.c
41288+++ b/drivers/cpuidle/sysfs.c
41289@@ -135,7 +135,7 @@ static struct attribute *cpuidle_switch_attrs[] = {
41290 NULL
41291 };
41292
41293-static struct attribute_group cpuidle_attr_group = {
41294+static attribute_group_no_const cpuidle_attr_group = {
41295 .attrs = cpuidle_default_attrs,
41296 .name = "cpuidle",
41297 };
41298diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
41299index 8d2a772..33826c9 100644
41300--- a/drivers/crypto/hifn_795x.c
41301+++ b/drivers/crypto/hifn_795x.c
41302@@ -51,7 +51,7 @@ module_param_string(hifn_pll_ref, hifn_pll_ref, sizeof(hifn_pll_ref), 0444);
41303 MODULE_PARM_DESC(hifn_pll_ref,
41304 "PLL reference clock (pci[freq] or ext[freq], default ext)");
41305
41306-static atomic_t hifn_dev_number;
41307+static atomic_unchecked_t hifn_dev_number;
41308
41309 #define ACRYPTO_OP_DECRYPT 0
41310 #define ACRYPTO_OP_ENCRYPT 1
41311@@ -2577,7 +2577,7 @@ static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id)
41312 goto err_out_disable_pci_device;
41313
41314 snprintf(name, sizeof(name), "hifn%d",
41315- atomic_inc_return(&hifn_dev_number)-1);
41316+ atomic_inc_return_unchecked(&hifn_dev_number)-1);
41317
41318 err = pci_request_regions(pdev, name);
41319 if (err)
41320diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
41321index 9f90369..bfcacdb 100644
41322--- a/drivers/devfreq/devfreq.c
41323+++ b/drivers/devfreq/devfreq.c
41324@@ -673,7 +673,7 @@ int devfreq_add_governor(struct devfreq_governor *governor)
41325 goto err_out;
41326 }
41327
41328- list_add(&governor->node, &devfreq_governor_list);
41329+ pax_list_add((struct list_head *)&governor->node, &devfreq_governor_list);
41330
41331 list_for_each_entry(devfreq, &devfreq_list, node) {
41332 int ret = 0;
41333@@ -761,7 +761,7 @@ int devfreq_remove_governor(struct devfreq_governor *governor)
41334 }
41335 }
41336
41337- list_del(&governor->node);
41338+ pax_list_del((struct list_head *)&governor->node);
41339 err_out:
41340 mutex_unlock(&devfreq_list_lock);
41341
41342diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c
41343index 42d4974..2714f36 100644
41344--- a/drivers/dma/sh/shdma-base.c
41345+++ b/drivers/dma/sh/shdma-base.c
41346@@ -228,8 +228,8 @@ static int shdma_alloc_chan_resources(struct dma_chan *chan)
41347 schan->slave_id = -EINVAL;
41348 }
41349
41350- schan->desc = kcalloc(NR_DESCS_PER_CHANNEL,
41351- sdev->desc_size, GFP_KERNEL);
41352+ schan->desc = kcalloc(sdev->desc_size,
41353+ NR_DESCS_PER_CHANNEL, GFP_KERNEL);
41354 if (!schan->desc) {
41355 ret = -ENOMEM;
41356 goto edescalloc;
41357diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c
41358index 58eb857..d7e42c8 100644
41359--- a/drivers/dma/sh/shdmac.c
41360+++ b/drivers/dma/sh/shdmac.c
41361@@ -513,7 +513,7 @@ static int sh_dmae_nmi_handler(struct notifier_block *self,
41362 return ret;
41363 }
41364
41365-static struct notifier_block sh_dmae_nmi_notifier __read_mostly = {
41366+static struct notifier_block sh_dmae_nmi_notifier = {
41367 .notifier_call = sh_dmae_nmi_handler,
41368
41369 /* Run before NMI debug handler and KGDB */
41370diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
41371index 592af5f..bb1d583 100644
41372--- a/drivers/edac/edac_device.c
41373+++ b/drivers/edac/edac_device.c
41374@@ -477,9 +477,9 @@ void edac_device_reset_delay_period(struct edac_device_ctl_info *edac_dev,
41375 */
41376 int edac_device_alloc_index(void)
41377 {
41378- static atomic_t device_indexes = ATOMIC_INIT(0);
41379+ static atomic_unchecked_t device_indexes = ATOMIC_INIT(0);
41380
41381- return atomic_inc_return(&device_indexes) - 1;
41382+ return atomic_inc_return_unchecked(&device_indexes) - 1;
41383 }
41384 EXPORT_SYMBOL_GPL(edac_device_alloc_index);
41385
41386diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
41387index a6cd361..7bdbf53 100644
41388--- a/drivers/edac/edac_mc_sysfs.c
41389+++ b/drivers/edac/edac_mc_sysfs.c
41390@@ -154,7 +154,7 @@ static const char * const edac_caps[] = {
41391 struct dev_ch_attribute {
41392 struct device_attribute attr;
41393 int channel;
41394-};
41395+} __do_const;
41396
41397 #define DEVICE_CHANNEL(_name, _mode, _show, _store, _var) \
41398 struct dev_ch_attribute dev_attr_legacy_##_name = \
41399@@ -1011,14 +1011,16 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
41400 }
41401
41402 if (mci->set_sdram_scrub_rate || mci->get_sdram_scrub_rate) {
41403+ pax_open_kernel();
41404 if (mci->get_sdram_scrub_rate) {
41405- dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO;
41406- dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show;
41407+ *(umode_t *)&dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO;
41408+ *(void **)&dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show;
41409 }
41410 if (mci->set_sdram_scrub_rate) {
41411- dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR;
41412- dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store;
41413+ *(umode_t *)&dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR;
41414+ *(void **)&dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store;
41415 }
41416+ pax_close_kernel();
41417 err = device_create_file(&mci->dev,
41418 &dev_attr_sdram_scrub_rate);
41419 if (err) {
41420diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c
41421index 2cf44b4d..6dd2dc7 100644
41422--- a/drivers/edac/edac_pci.c
41423+++ b/drivers/edac/edac_pci.c
41424@@ -29,7 +29,7 @@
41425
41426 static DEFINE_MUTEX(edac_pci_ctls_mutex);
41427 static LIST_HEAD(edac_pci_list);
41428-static atomic_t pci_indexes = ATOMIC_INIT(0);
41429+static atomic_unchecked_t pci_indexes = ATOMIC_INIT(0);
41430
41431 /*
41432 * edac_pci_alloc_ctl_info
41433@@ -315,7 +315,7 @@ EXPORT_SYMBOL_GPL(edac_pci_reset_delay_period);
41434 */
41435 int edac_pci_alloc_index(void)
41436 {
41437- return atomic_inc_return(&pci_indexes) - 1;
41438+ return atomic_inc_return_unchecked(&pci_indexes) - 1;
41439 }
41440 EXPORT_SYMBOL_GPL(edac_pci_alloc_index);
41441
41442diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
41443index e8658e4..22746d6 100644
41444--- a/drivers/edac/edac_pci_sysfs.c
41445+++ b/drivers/edac/edac_pci_sysfs.c
41446@@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
41447 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
41448 static int edac_pci_poll_msec = 1000; /* one second workq period */
41449
41450-static atomic_t pci_parity_count = ATOMIC_INIT(0);
41451-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
41452+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
41453+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
41454
41455 static struct kobject *edac_pci_top_main_kobj;
41456 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
41457@@ -235,7 +235,7 @@ struct edac_pci_dev_attribute {
41458 void *value;
41459 ssize_t(*show) (void *, char *);
41460 ssize_t(*store) (void *, const char *, size_t);
41461-};
41462+} __do_const;
41463
41464 /* Set of show/store abstract level functions for PCI Parity object */
41465 static ssize_t edac_pci_dev_show(struct kobject *kobj, struct attribute *attr,
41466@@ -579,7 +579,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
41467 edac_printk(KERN_CRIT, EDAC_PCI,
41468 "Signaled System Error on %s\n",
41469 pci_name(dev));
41470- atomic_inc(&pci_nonparity_count);
41471+ atomic_inc_unchecked(&pci_nonparity_count);
41472 }
41473
41474 if (status & (PCI_STATUS_PARITY)) {
41475@@ -587,7 +587,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
41476 "Master Data Parity Error on %s\n",
41477 pci_name(dev));
41478
41479- atomic_inc(&pci_parity_count);
41480+ atomic_inc_unchecked(&pci_parity_count);
41481 }
41482
41483 if (status & (PCI_STATUS_DETECTED_PARITY)) {
41484@@ -595,7 +595,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
41485 "Detected Parity Error on %s\n",
41486 pci_name(dev));
41487
41488- atomic_inc(&pci_parity_count);
41489+ atomic_inc_unchecked(&pci_parity_count);
41490 }
41491 }
41492
41493@@ -618,7 +618,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
41494 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
41495 "Signaled System Error on %s\n",
41496 pci_name(dev));
41497- atomic_inc(&pci_nonparity_count);
41498+ atomic_inc_unchecked(&pci_nonparity_count);
41499 }
41500
41501 if (status & (PCI_STATUS_PARITY)) {
41502@@ -626,7 +626,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
41503 "Master Data Parity Error on "
41504 "%s\n", pci_name(dev));
41505
41506- atomic_inc(&pci_parity_count);
41507+ atomic_inc_unchecked(&pci_parity_count);
41508 }
41509
41510 if (status & (PCI_STATUS_DETECTED_PARITY)) {
41511@@ -634,7 +634,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
41512 "Detected Parity Error on %s\n",
41513 pci_name(dev));
41514
41515- atomic_inc(&pci_parity_count);
41516+ atomic_inc_unchecked(&pci_parity_count);
41517 }
41518 }
41519 }
41520@@ -672,7 +672,7 @@ void edac_pci_do_parity_check(void)
41521 if (!check_pci_errors)
41522 return;
41523
41524- before_count = atomic_read(&pci_parity_count);
41525+ before_count = atomic_read_unchecked(&pci_parity_count);
41526
41527 /* scan all PCI devices looking for a Parity Error on devices and
41528 * bridges.
41529@@ -684,7 +684,7 @@ void edac_pci_do_parity_check(void)
41530 /* Only if operator has selected panic on PCI Error */
41531 if (edac_pci_get_panic_on_pe()) {
41532 /* If the count is different 'after' from 'before' */
41533- if (before_count != atomic_read(&pci_parity_count))
41534+ if (before_count != atomic_read_unchecked(&pci_parity_count))
41535 panic("EDAC: PCI Parity Error");
41536 }
41537 }
41538diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
41539index 51b7e3a..aa8a3e8 100644
41540--- a/drivers/edac/mce_amd.h
41541+++ b/drivers/edac/mce_amd.h
41542@@ -77,7 +77,7 @@ struct amd_decoder_ops {
41543 bool (*mc0_mce)(u16, u8);
41544 bool (*mc1_mce)(u16, u8);
41545 bool (*mc2_mce)(u16, u8);
41546-};
41547+} __no_const;
41548
41549 void amd_report_gart_errors(bool);
41550 void amd_register_ecc_decoder(void (*f)(int, struct mce *));
41551diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
41552index 57ea7f4..af06b76 100644
41553--- a/drivers/firewire/core-card.c
41554+++ b/drivers/firewire/core-card.c
41555@@ -528,9 +528,9 @@ void fw_card_initialize(struct fw_card *card,
41556 const struct fw_card_driver *driver,
41557 struct device *device)
41558 {
41559- static atomic_t index = ATOMIC_INIT(-1);
41560+ static atomic_unchecked_t index = ATOMIC_INIT(-1);
41561
41562- card->index = atomic_inc_return(&index);
41563+ card->index = atomic_inc_return_unchecked(&index);
41564 card->driver = driver;
41565 card->device = device;
41566 card->current_tlabel = 0;
41567@@ -680,7 +680,7 @@ EXPORT_SYMBOL_GPL(fw_card_release);
41568
41569 void fw_core_remove_card(struct fw_card *card)
41570 {
41571- struct fw_card_driver dummy_driver = dummy_driver_template;
41572+ fw_card_driver_no_const dummy_driver = dummy_driver_template;
41573
41574 card->driver->update_phy_reg(card, 4,
41575 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
41576diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
41577index 2c6d5e1..a2cca6b 100644
41578--- a/drivers/firewire/core-device.c
41579+++ b/drivers/firewire/core-device.c
41580@@ -253,7 +253,7 @@ EXPORT_SYMBOL(fw_device_enable_phys_dma);
41581 struct config_rom_attribute {
41582 struct device_attribute attr;
41583 u32 key;
41584-};
41585+} __do_const;
41586
41587 static ssize_t show_immediate(struct device *dev,
41588 struct device_attribute *dattr, char *buf)
41589diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
41590index eb6935c..3cc2bfa 100644
41591--- a/drivers/firewire/core-transaction.c
41592+++ b/drivers/firewire/core-transaction.c
41593@@ -38,6 +38,7 @@
41594 #include <linux/timer.h>
41595 #include <linux/types.h>
41596 #include <linux/workqueue.h>
41597+#include <linux/sched.h>
41598
41599 #include <asm/byteorder.h>
41600
41601diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
41602index e1480ff6..1a429bd 100644
41603--- a/drivers/firewire/core.h
41604+++ b/drivers/firewire/core.h
41605@@ -111,6 +111,7 @@ struct fw_card_driver {
41606
41607 int (*stop_iso)(struct fw_iso_context *ctx);
41608 };
41609+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
41610
41611 void fw_card_initialize(struct fw_card *card,
41612 const struct fw_card_driver *driver, struct device *device);
41613diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
41614index a66a321..f6caf20 100644
41615--- a/drivers/firewire/ohci.c
41616+++ b/drivers/firewire/ohci.c
41617@@ -2056,10 +2056,12 @@ static void bus_reset_work(struct work_struct *work)
41618 be32_to_cpu(ohci->next_header));
41619 }
41620
41621+#ifndef CONFIG_GRKERNSEC
41622 if (param_remote_dma) {
41623 reg_write(ohci, OHCI1394_PhyReqFilterHiSet, ~0);
41624 reg_write(ohci, OHCI1394_PhyReqFilterLoSet, ~0);
41625 }
41626+#endif
41627
41628 spin_unlock_irq(&ohci->lock);
41629
41630@@ -2591,8 +2593,10 @@ static int ohci_enable_phys_dma(struct fw_card *card,
41631 unsigned long flags;
41632 int n, ret = 0;
41633
41634+#ifndef CONFIG_GRKERNSEC
41635 if (param_remote_dma)
41636 return 0;
41637+#endif
41638
41639 /*
41640 * FIXME: Make sure this bitmask is cleared when we clear the busReset
41641diff --git a/drivers/firmware/dmi-id.c b/drivers/firmware/dmi-id.c
41642index 94a58a0..f5eba42 100644
41643--- a/drivers/firmware/dmi-id.c
41644+++ b/drivers/firmware/dmi-id.c
41645@@ -16,7 +16,7 @@
41646 struct dmi_device_attribute{
41647 struct device_attribute dev_attr;
41648 int field;
41649-};
41650+} __do_const;
41651 #define to_dmi_dev_attr(_dev_attr) \
41652 container_of(_dev_attr, struct dmi_device_attribute, dev_attr)
41653
41654diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
41655index 17afc51..0ef90cd 100644
41656--- a/drivers/firmware/dmi_scan.c
41657+++ b/drivers/firmware/dmi_scan.c
41658@@ -835,7 +835,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
41659 if (buf == NULL)
41660 return -1;
41661
41662- dmi_table(buf, dmi_len, dmi_num, decode, private_data);
41663+ dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
41664
41665 dmi_unmap(buf);
41666 return 0;
41667diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
41668index 5b53d61..72cee96 100644
41669--- a/drivers/firmware/efi/cper.c
41670+++ b/drivers/firmware/efi/cper.c
41671@@ -44,12 +44,12 @@ static char rcd_decode_str[CPER_REC_LEN];
41672 */
41673 u64 cper_next_record_id(void)
41674 {
41675- static atomic64_t seq;
41676+ static atomic64_unchecked_t seq;
41677
41678- if (!atomic64_read(&seq))
41679- atomic64_set(&seq, ((u64)get_seconds()) << 32);
41680+ if (!atomic64_read_unchecked(&seq))
41681+ atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
41682
41683- return atomic64_inc_return(&seq);
41684+ return atomic64_inc_return_unchecked(&seq);
41685 }
41686 EXPORT_SYMBOL_GPL(cper_next_record_id);
41687
41688diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
41689index 64ecbb5..d921eb3 100644
41690--- a/drivers/firmware/efi/efi.c
41691+++ b/drivers/firmware/efi/efi.c
41692@@ -126,14 +126,16 @@ static struct attribute_group efi_subsys_attr_group = {
41693 };
41694
41695 static struct efivars generic_efivars;
41696-static struct efivar_operations generic_ops;
41697+static efivar_operations_no_const generic_ops __read_only;
41698
41699 static int generic_ops_register(void)
41700 {
41701- generic_ops.get_variable = efi.get_variable;
41702- generic_ops.set_variable = efi.set_variable;
41703- generic_ops.get_next_variable = efi.get_next_variable;
41704- generic_ops.query_variable_store = efi_query_variable_store;
41705+ pax_open_kernel();
41706+ *(void **)&generic_ops.get_variable = efi.get_variable;
41707+ *(void **)&generic_ops.set_variable = efi.set_variable;
41708+ *(void **)&generic_ops.get_next_variable = efi.get_next_variable;
41709+ *(void **)&generic_ops.query_variable_store = efi_query_variable_store;
41710+ pax_close_kernel();
41711
41712 return efivars_register(&generic_efivars, &generic_ops, efi_kobj);
41713 }
41714diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c
41715index f256ecd..387dcb1 100644
41716--- a/drivers/firmware/efi/efivars.c
41717+++ b/drivers/firmware/efi/efivars.c
41718@@ -589,7 +589,7 @@ efivar_create_sysfs_entry(struct efivar_entry *new_var)
41719 static int
41720 create_efivars_bin_attributes(void)
41721 {
41722- struct bin_attribute *attr;
41723+ bin_attribute_no_const *attr;
41724 int error;
41725
41726 /* new_var */
41727diff --git a/drivers/firmware/google/memconsole.c b/drivers/firmware/google/memconsole.c
41728index 2f569aa..c95f4fb 100644
41729--- a/drivers/firmware/google/memconsole.c
41730+++ b/drivers/firmware/google/memconsole.c
41731@@ -155,7 +155,10 @@ static int __init memconsole_init(void)
41732 if (!found_memconsole())
41733 return -ENODEV;
41734
41735- memconsole_bin_attr.size = memconsole_length;
41736+ pax_open_kernel();
41737+ *(size_t *)&memconsole_bin_attr.size = memconsole_length;
41738+ pax_close_kernel();
41739+
41740 return sysfs_create_bin_file(firmware_kobj, &memconsole_bin_attr);
41741 }
41742
41743diff --git a/drivers/gpio/gpio-em.c b/drivers/gpio/gpio-em.c
41744index fe49ec3..1ade794 100644
41745--- a/drivers/gpio/gpio-em.c
41746+++ b/drivers/gpio/gpio-em.c
41747@@ -278,7 +278,7 @@ static int em_gio_probe(struct platform_device *pdev)
41748 struct em_gio_priv *p;
41749 struct resource *io[2], *irq[2];
41750 struct gpio_chip *gpio_chip;
41751- struct irq_chip *irq_chip;
41752+ irq_chip_no_const *irq_chip;
41753 const char *name = dev_name(&pdev->dev);
41754 int ret;
41755
41756diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c
41757index 3784e81..73637b5 100644
41758--- a/drivers/gpio/gpio-ich.c
41759+++ b/drivers/gpio/gpio-ich.c
41760@@ -94,7 +94,7 @@ struct ichx_desc {
41761 * this option allows driver caching written output values
41762 */
41763 bool use_outlvl_cache;
41764-};
41765+} __do_const;
41766
41767 static struct {
41768 spinlock_t lock;
41769diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
41770index bf6c094..6573caf 100644
41771--- a/drivers/gpio/gpio-rcar.c
41772+++ b/drivers/gpio/gpio-rcar.c
41773@@ -357,7 +357,7 @@ static int gpio_rcar_probe(struct platform_device *pdev)
41774 struct gpio_rcar_priv *p;
41775 struct resource *io, *irq;
41776 struct gpio_chip *gpio_chip;
41777- struct irq_chip *irq_chip;
41778+ irq_chip_no_const *irq_chip;
41779 struct device *dev = &pdev->dev;
41780 const char *name = dev_name(dev);
41781 int ret;
41782diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
41783index dbf28fa..04dad4e 100644
41784--- a/drivers/gpio/gpio-vr41xx.c
41785+++ b/drivers/gpio/gpio-vr41xx.c
41786@@ -224,7 +224,7 @@ static int giu_get_irq(unsigned int irq)
41787 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
41788 maskl, pendl, maskh, pendh);
41789
41790- atomic_inc(&irq_err_count);
41791+ atomic_inc_unchecked(&irq_err_count);
41792
41793 return -EINVAL;
41794 }
41795diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
41796index c68d037..2f4f9a9 100644
41797--- a/drivers/gpio/gpiolib.c
41798+++ b/drivers/gpio/gpiolib.c
41799@@ -529,8 +529,10 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip)
41800 }
41801
41802 if (gpiochip->irqchip) {
41803- gpiochip->irqchip->irq_request_resources = NULL;
41804- gpiochip->irqchip->irq_release_resources = NULL;
41805+ pax_open_kernel();
41806+ *(void **)&gpiochip->irqchip->irq_request_resources = NULL;
41807+ *(void **)&gpiochip->irqchip->irq_release_resources = NULL;
41808+ pax_close_kernel();
41809 gpiochip->irqchip = NULL;
41810 }
41811 }
41812@@ -596,8 +598,11 @@ int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
41813 gpiochip->irqchip = NULL;
41814 return -EINVAL;
41815 }
41816- irqchip->irq_request_resources = gpiochip_irq_reqres;
41817- irqchip->irq_release_resources = gpiochip_irq_relres;
41818+
41819+ pax_open_kernel();
41820+ *(void **)&irqchip->irq_request_resources = gpiochip_irq_reqres;
41821+ *(void **)&irqchip->irq_release_resources = gpiochip_irq_relres;
41822+ pax_close_kernel();
41823
41824 /*
41825 * Prepare the mapping since the irqchip shall be orthogonal to
41826diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
41827index 90e7730..3b41807 100644
41828--- a/drivers/gpu/drm/drm_crtc.c
41829+++ b/drivers/gpu/drm/drm_crtc.c
41830@@ -3861,7 +3861,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
41831 goto done;
41832 }
41833
41834- if (copy_to_user(&enum_ptr[copied].name,
41835+ if (copy_to_user(enum_ptr[copied].name,
41836 &prop_enum->name, DRM_PROP_NAME_LEN)) {
41837 ret = -EFAULT;
41838 goto done;
41839diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
41840index 3242e20..7e4f621 100644
41841--- a/drivers/gpu/drm/drm_drv.c
41842+++ b/drivers/gpu/drm/drm_drv.c
41843@@ -463,7 +463,7 @@ void drm_unplug_dev(struct drm_device *dev)
41844
41845 drm_device_set_unplugged(dev);
41846
41847- if (dev->open_count == 0) {
41848+ if (local_read(&dev->open_count) == 0) {
41849 drm_put_dev(dev);
41850 }
41851 mutex_unlock(&drm_global_mutex);
41852diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
41853index 79d5221..7ff73496 100644
41854--- a/drivers/gpu/drm/drm_fops.c
41855+++ b/drivers/gpu/drm/drm_fops.c
41856@@ -89,7 +89,7 @@ int drm_open(struct inode *inode, struct file *filp)
41857 return PTR_ERR(minor);
41858
41859 dev = minor->dev;
41860- if (!dev->open_count++)
41861+ if (local_inc_return(&dev->open_count) == 1)
41862 need_setup = 1;
41863
41864 /* share address_space across all char-devs of a single device */
41865@@ -106,7 +106,7 @@ int drm_open(struct inode *inode, struct file *filp)
41866 return 0;
41867
41868 err_undo:
41869- dev->open_count--;
41870+ local_dec(&dev->open_count);
41871 drm_minor_release(minor);
41872 return retcode;
41873 }
41874@@ -384,7 +384,7 @@ int drm_release(struct inode *inode, struct file *filp)
41875
41876 mutex_lock(&drm_global_mutex);
41877
41878- DRM_DEBUG("open_count = %d\n", dev->open_count);
41879+ DRM_DEBUG("open_count = %ld\n", local_read(&dev->open_count));
41880
41881 mutex_lock(&dev->struct_mutex);
41882 list_del(&file_priv->lhead);
41883@@ -397,10 +397,10 @@ int drm_release(struct inode *inode, struct file *filp)
41884 * Begin inline drm_release
41885 */
41886
41887- DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
41888+ DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %ld\n",
41889 task_pid_nr(current),
41890 (long)old_encode_dev(file_priv->minor->kdev->devt),
41891- dev->open_count);
41892+ local_read(&dev->open_count));
41893
41894 /* Release any auth tokens that might point to this file_priv,
41895 (do that under the drm_global_mutex) */
41896@@ -471,7 +471,7 @@ int drm_release(struct inode *inode, struct file *filp)
41897 * End inline drm_release
41898 */
41899
41900- if (!--dev->open_count) {
41901+ if (local_dec_and_test(&dev->open_count)) {
41902 retcode = drm_lastclose(dev);
41903 if (drm_device_is_unplugged(dev))
41904 drm_put_dev(dev);
41905diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
41906index 3d2e91c..d31c4c9 100644
41907--- a/drivers/gpu/drm/drm_global.c
41908+++ b/drivers/gpu/drm/drm_global.c
41909@@ -36,7 +36,7 @@
41910 struct drm_global_item {
41911 struct mutex mutex;
41912 void *object;
41913- int refcount;
41914+ atomic_t refcount;
41915 };
41916
41917 static struct drm_global_item glob[DRM_GLOBAL_NUM];
41918@@ -49,7 +49,7 @@ void drm_global_init(void)
41919 struct drm_global_item *item = &glob[i];
41920 mutex_init(&item->mutex);
41921 item->object = NULL;
41922- item->refcount = 0;
41923+ atomic_set(&item->refcount, 0);
41924 }
41925 }
41926
41927@@ -59,7 +59,7 @@ void drm_global_release(void)
41928 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
41929 struct drm_global_item *item = &glob[i];
41930 BUG_ON(item->object != NULL);
41931- BUG_ON(item->refcount != 0);
41932+ BUG_ON(atomic_read(&item->refcount) != 0);
41933 }
41934 }
41935
41936@@ -69,7 +69,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
41937 struct drm_global_item *item = &glob[ref->global_type];
41938
41939 mutex_lock(&item->mutex);
41940- if (item->refcount == 0) {
41941+ if (atomic_read(&item->refcount) == 0) {
41942 item->object = kzalloc(ref->size, GFP_KERNEL);
41943 if (unlikely(item->object == NULL)) {
41944 ret = -ENOMEM;
41945@@ -82,7 +82,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
41946 goto out_err;
41947
41948 }
41949- ++item->refcount;
41950+ atomic_inc(&item->refcount);
41951 ref->object = item->object;
41952 mutex_unlock(&item->mutex);
41953 return 0;
41954@@ -98,9 +98,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
41955 struct drm_global_item *item = &glob[ref->global_type];
41956
41957 mutex_lock(&item->mutex);
41958- BUG_ON(item->refcount == 0);
41959+ BUG_ON(atomic_read(&item->refcount) == 0);
41960 BUG_ON(ref->object != item->object);
41961- if (--item->refcount == 0) {
41962+ if (atomic_dec_and_test(&item->refcount)) {
41963 ref->release(ref);
41964 item->object = NULL;
41965 }
41966diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
41967index ecaf0fa..a49cee9 100644
41968--- a/drivers/gpu/drm/drm_info.c
41969+++ b/drivers/gpu/drm/drm_info.c
41970@@ -73,10 +73,13 @@ int drm_vm_info(struct seq_file *m, void *data)
41971 struct drm_local_map *map;
41972 struct drm_map_list *r_list;
41973
41974- /* Hardcoded from _DRM_FRAME_BUFFER,
41975- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
41976- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
41977- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
41978+ static const char * const types[] = {
41979+ [_DRM_FRAME_BUFFER] = "FB",
41980+ [_DRM_REGISTERS] = "REG",
41981+ [_DRM_SHM] = "SHM",
41982+ [_DRM_AGP] = "AGP",
41983+ [_DRM_SCATTER_GATHER] = "SG",
41984+ [_DRM_CONSISTENT] = "PCI"};
41985 const char *type;
41986 int i;
41987
41988@@ -87,7 +90,7 @@ int drm_vm_info(struct seq_file *m, void *data)
41989 map = r_list->map;
41990 if (!map)
41991 continue;
41992- if (map->type < 0 || map->type > 5)
41993+ if (map->type >= ARRAY_SIZE(types))
41994 type = "??";
41995 else
41996 type = types[map->type];
41997@@ -259,7 +262,11 @@ int drm_vma_info(struct seq_file *m, void *data)
41998 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
41999 vma->vm_flags & VM_LOCKED ? 'l' : '-',
42000 vma->vm_flags & VM_IO ? 'i' : '-',
42001+#ifdef CONFIG_GRKERNSEC_HIDESYM
42002+ 0);
42003+#else
42004 vma->vm_pgoff);
42005+#endif
42006
42007 #if defined(__i386__)
42008 pgprot = pgprot_val(vma->vm_page_prot);
42009diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
42010index 2f4c4343..dd12cd2 100644
42011--- a/drivers/gpu/drm/drm_ioc32.c
42012+++ b/drivers/gpu/drm/drm_ioc32.c
42013@@ -457,7 +457,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
42014 request = compat_alloc_user_space(nbytes);
42015 if (!access_ok(VERIFY_WRITE, request, nbytes))
42016 return -EFAULT;
42017- list = (struct drm_buf_desc *) (request + 1);
42018+ list = (struct drm_buf_desc __user *) (request + 1);
42019
42020 if (__put_user(count, &request->count)
42021 || __put_user(list, &request->list))
42022@@ -518,7 +518,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
42023 request = compat_alloc_user_space(nbytes);
42024 if (!access_ok(VERIFY_WRITE, request, nbytes))
42025 return -EFAULT;
42026- list = (struct drm_buf_pub *) (request + 1);
42027+ list = (struct drm_buf_pub __user *) (request + 1);
42028
42029 if (__put_user(count, &request->count)
42030 || __put_user(list, &request->list))
42031@@ -1016,7 +1016,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
42032 return 0;
42033 }
42034
42035-drm_ioctl_compat_t *drm_compat_ioctls[] = {
42036+drm_ioctl_compat_t drm_compat_ioctls[] = {
42037 [DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version,
42038 [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE32)] = compat_drm_getunique,
42039 [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP32)] = compat_drm_getmap,
42040@@ -1062,7 +1062,6 @@ drm_ioctl_compat_t *drm_compat_ioctls[] = {
42041 long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
42042 {
42043 unsigned int nr = DRM_IOCTL_NR(cmd);
42044- drm_ioctl_compat_t *fn;
42045 int ret;
42046
42047 /* Assume that ioctls without an explicit compat routine will just
42048@@ -1072,10 +1071,8 @@ long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
42049 if (nr >= ARRAY_SIZE(drm_compat_ioctls))
42050 return drm_ioctl(filp, cmd, arg);
42051
42052- fn = drm_compat_ioctls[nr];
42053-
42054- if (fn != NULL)
42055- ret = (*fn) (filp, cmd, arg);
42056+ if (drm_compat_ioctls[nr] != NULL)
42057+ ret = (*drm_compat_ioctls[nr]) (filp, cmd, arg);
42058 else
42059 ret = drm_ioctl(filp, cmd, arg);
42060
42061diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
42062index 40be746..fd78faf 100644
42063--- a/drivers/gpu/drm/drm_ioctl.c
42064+++ b/drivers/gpu/drm/drm_ioctl.c
42065@@ -642,7 +642,7 @@ long drm_ioctl(struct file *filp,
42066 struct drm_file *file_priv = filp->private_data;
42067 struct drm_device *dev;
42068 const struct drm_ioctl_desc *ioctl = NULL;
42069- drm_ioctl_t *func;
42070+ drm_ioctl_no_const_t func;
42071 unsigned int nr = DRM_IOCTL_NR(cmd);
42072 int retcode = -EINVAL;
42073 char stack_kdata[128];
42074diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
42075index d4d16ed..8fb0b51 100644
42076--- a/drivers/gpu/drm/i810/i810_drv.h
42077+++ b/drivers/gpu/drm/i810/i810_drv.h
42078@@ -108,8 +108,8 @@ typedef struct drm_i810_private {
42079 int page_flipping;
42080
42081 wait_queue_head_t irq_queue;
42082- atomic_t irq_received;
42083- atomic_t irq_emitted;
42084+ atomic_unchecked_t irq_received;
42085+ atomic_unchecked_t irq_emitted;
42086
42087 int front_offset;
42088 } drm_i810_private_t;
42089diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
42090index 9933c26..32cc097 100644
42091--- a/drivers/gpu/drm/i915/i915_dma.c
42092+++ b/drivers/gpu/drm/i915/i915_dma.c
42093@@ -1292,7 +1292,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
42094 * locking inversion with the driver load path. And the access here is
42095 * completely racy anyway. So don't bother with locking for now.
42096 */
42097- return dev->open_count == 0;
42098+ return local_read(&dev->open_count) == 0;
42099 }
42100
42101 static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
42102diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
42103index 60998fc..3b244bc 100644
42104--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
42105+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
42106@@ -891,9 +891,9 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
42107
42108 static int
42109 validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
42110- int count)
42111+ unsigned int count)
42112 {
42113- int i;
42114+ unsigned int i;
42115 unsigned relocs_total = 0;
42116 unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
42117
42118diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
42119index 2e0613e..a8b94d9 100644
42120--- a/drivers/gpu/drm/i915/i915_ioc32.c
42121+++ b/drivers/gpu/drm/i915/i915_ioc32.c
42122@@ -181,7 +181,7 @@ static int compat_i915_alloc(struct file *file, unsigned int cmd,
42123 (unsigned long)request);
42124 }
42125
42126-static drm_ioctl_compat_t *i915_compat_ioctls[] = {
42127+static drm_ioctl_compat_t i915_compat_ioctls[] = {
42128 [DRM_I915_BATCHBUFFER] = compat_i915_batchbuffer,
42129 [DRM_I915_CMDBUFFER] = compat_i915_cmdbuffer,
42130 [DRM_I915_GETPARAM] = compat_i915_getparam,
42131@@ -202,18 +202,15 @@ static drm_ioctl_compat_t *i915_compat_ioctls[] = {
42132 long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
42133 {
42134 unsigned int nr = DRM_IOCTL_NR(cmd);
42135- drm_ioctl_compat_t *fn = NULL;
42136 int ret;
42137
42138 if (nr < DRM_COMMAND_BASE)
42139 return drm_compat_ioctl(filp, cmd, arg);
42140
42141- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(i915_compat_ioctls))
42142- fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
42143-
42144- if (fn != NULL)
42145+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(i915_compat_ioctls)) {
42146+ drm_ioctl_compat_t fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
42147 ret = (*fn) (filp, cmd, arg);
42148- else
42149+ } else
42150 ret = drm_ioctl(filp, cmd, arg);
42151
42152 return ret;
42153diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
42154index d8324c6..fc9b704 100644
42155--- a/drivers/gpu/drm/i915/intel_display.c
42156+++ b/drivers/gpu/drm/i915/intel_display.c
42157@@ -12437,13 +12437,13 @@ struct intel_quirk {
42158 int subsystem_vendor;
42159 int subsystem_device;
42160 void (*hook)(struct drm_device *dev);
42161-};
42162+} __do_const;
42163
42164 /* For systems that don't have a meaningful PCI subdevice/subvendor ID */
42165 struct intel_dmi_quirk {
42166 void (*hook)(struct drm_device *dev);
42167 const struct dmi_system_id (*dmi_id_list)[];
42168-};
42169+} __do_const;
42170
42171 static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
42172 {
42173@@ -12451,18 +12451,20 @@ static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
42174 return 1;
42175 }
42176
42177-static const struct intel_dmi_quirk intel_dmi_quirks[] = {
42178+static const struct dmi_system_id intel_dmi_quirks_table[] = {
42179 {
42180- .dmi_id_list = &(const struct dmi_system_id[]) {
42181- {
42182- .callback = intel_dmi_reverse_brightness,
42183- .ident = "NCR Corporation",
42184- .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
42185- DMI_MATCH(DMI_PRODUCT_NAME, ""),
42186- },
42187- },
42188- { } /* terminating entry */
42189+ .callback = intel_dmi_reverse_brightness,
42190+ .ident = "NCR Corporation",
42191+ .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
42192+ DMI_MATCH(DMI_PRODUCT_NAME, ""),
42193 },
42194+ },
42195+ { } /* terminating entry */
42196+};
42197+
42198+static const struct intel_dmi_quirk intel_dmi_quirks[] = {
42199+ {
42200+ .dmi_id_list = &intel_dmi_quirks_table,
42201 .hook = quirk_invert_brightness,
42202 },
42203 };
42204diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
42205index fe45321..836fdca 100644
42206--- a/drivers/gpu/drm/mga/mga_drv.h
42207+++ b/drivers/gpu/drm/mga/mga_drv.h
42208@@ -120,9 +120,9 @@ typedef struct drm_mga_private {
42209 u32 clear_cmd;
42210 u32 maccess;
42211
42212- atomic_t vbl_received; /**< Number of vblanks received. */
42213+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
42214 wait_queue_head_t fence_queue;
42215- atomic_t last_fence_retired;
42216+ atomic_unchecked_t last_fence_retired;
42217 u32 next_fence_to_post;
42218
42219 unsigned int fb_cpp;
42220diff --git a/drivers/gpu/drm/mga/mga_ioc32.c b/drivers/gpu/drm/mga/mga_ioc32.c
42221index 729bfd5..ead8823 100644
42222--- a/drivers/gpu/drm/mga/mga_ioc32.c
42223+++ b/drivers/gpu/drm/mga/mga_ioc32.c
42224@@ -190,7 +190,7 @@ static int compat_mga_dma_bootstrap(struct file *file, unsigned int cmd,
42225 return 0;
42226 }
42227
42228-drm_ioctl_compat_t *mga_compat_ioctls[] = {
42229+drm_ioctl_compat_t mga_compat_ioctls[] = {
42230 [DRM_MGA_INIT] = compat_mga_init,
42231 [DRM_MGA_GETPARAM] = compat_mga_getparam,
42232 [DRM_MGA_DMA_BOOTSTRAP] = compat_mga_dma_bootstrap,
42233@@ -208,18 +208,15 @@ drm_ioctl_compat_t *mga_compat_ioctls[] = {
42234 long mga_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
42235 {
42236 unsigned int nr = DRM_IOCTL_NR(cmd);
42237- drm_ioctl_compat_t *fn = NULL;
42238 int ret;
42239
42240 if (nr < DRM_COMMAND_BASE)
42241 return drm_compat_ioctl(filp, cmd, arg);
42242
42243- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(mga_compat_ioctls))
42244- fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
42245-
42246- if (fn != NULL)
42247+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(mga_compat_ioctls)) {
42248+ drm_ioctl_compat_t fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
42249 ret = (*fn) (filp, cmd, arg);
42250- else
42251+ } else
42252 ret = drm_ioctl(filp, cmd, arg);
42253
42254 return ret;
42255diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
42256index 1b071b8..de8601a 100644
42257--- a/drivers/gpu/drm/mga/mga_irq.c
42258+++ b/drivers/gpu/drm/mga/mga_irq.c
42259@@ -43,7 +43,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
42260 if (crtc != 0)
42261 return 0;
42262
42263- return atomic_read(&dev_priv->vbl_received);
42264+ return atomic_read_unchecked(&dev_priv->vbl_received);
42265 }
42266
42267
42268@@ -59,7 +59,7 @@ irqreturn_t mga_driver_irq_handler(int irq, void *arg)
42269 /* VBLANK interrupt */
42270 if (status & MGA_VLINEPEN) {
42271 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
42272- atomic_inc(&dev_priv->vbl_received);
42273+ atomic_inc_unchecked(&dev_priv->vbl_received);
42274 drm_handle_vblank(dev, 0);
42275 handled = 1;
42276 }
42277@@ -78,7 +78,7 @@ irqreturn_t mga_driver_irq_handler(int irq, void *arg)
42278 if ((prim_start & ~0x03) != (prim_end & ~0x03))
42279 MGA_WRITE(MGA_PRIMEND, prim_end);
42280
42281- atomic_inc(&dev_priv->last_fence_retired);
42282+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
42283 wake_up(&dev_priv->fence_queue);
42284 handled = 1;
42285 }
42286@@ -129,7 +129,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
42287 * using fences.
42288 */
42289 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * HZ,
42290- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
42291+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
42292 - *sequence) <= (1 << 23)));
42293
42294 *sequence = cur_fence;
42295diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
42296index dae2c96..324dbe4 100644
42297--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
42298+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
42299@@ -963,7 +963,7 @@ static int parse_bit_tmds_tbl_entry(struct drm_device *dev, struct nvbios *bios,
42300 struct bit_table {
42301 const char id;
42302 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
42303-};
42304+} __no_const;
42305
42306 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
42307
42308diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
42309index b02b024..aed7bad 100644
42310--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
42311+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
42312@@ -119,7 +119,6 @@ struct nouveau_drm {
42313 struct drm_global_reference mem_global_ref;
42314 struct ttm_bo_global_ref bo_global_ref;
42315 struct ttm_bo_device bdev;
42316- atomic_t validate_sequence;
42317 int (*move)(struct nouveau_channel *,
42318 struct ttm_buffer_object *,
42319 struct ttm_mem_reg *, struct ttm_mem_reg *);
42320diff --git a/drivers/gpu/drm/nouveau/nouveau_ioc32.c b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
42321index 462679a..88e32a7 100644
42322--- a/drivers/gpu/drm/nouveau/nouveau_ioc32.c
42323+++ b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
42324@@ -50,7 +50,7 @@ long nouveau_compat_ioctl(struct file *filp, unsigned int cmd,
42325 unsigned long arg)
42326 {
42327 unsigned int nr = DRM_IOCTL_NR(cmd);
42328- drm_ioctl_compat_t *fn = NULL;
42329+ drm_ioctl_compat_t fn = NULL;
42330 int ret;
42331
42332 if (nr < DRM_COMMAND_BASE)
42333diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
42334index 53874b7..1db0a68 100644
42335--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
42336+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
42337@@ -127,11 +127,11 @@ nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
42338 }
42339
42340 const struct ttm_mem_type_manager_func nouveau_vram_manager = {
42341- nouveau_vram_manager_init,
42342- nouveau_vram_manager_fini,
42343- nouveau_vram_manager_new,
42344- nouveau_vram_manager_del,
42345- nouveau_vram_manager_debug
42346+ .init = nouveau_vram_manager_init,
42347+ .takedown = nouveau_vram_manager_fini,
42348+ .get_node = nouveau_vram_manager_new,
42349+ .put_node = nouveau_vram_manager_del,
42350+ .debug = nouveau_vram_manager_debug
42351 };
42352
42353 static int
42354@@ -196,11 +196,11 @@ nouveau_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
42355 }
42356
42357 const struct ttm_mem_type_manager_func nouveau_gart_manager = {
42358- nouveau_gart_manager_init,
42359- nouveau_gart_manager_fini,
42360- nouveau_gart_manager_new,
42361- nouveau_gart_manager_del,
42362- nouveau_gart_manager_debug
42363+ .init = nouveau_gart_manager_init,
42364+ .takedown = nouveau_gart_manager_fini,
42365+ .get_node = nouveau_gart_manager_new,
42366+ .put_node = nouveau_gart_manager_del,
42367+ .debug = nouveau_gart_manager_debug
42368 };
42369
42370 /*XXX*/
42371@@ -270,11 +270,11 @@ nv04_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
42372 }
42373
42374 const struct ttm_mem_type_manager_func nv04_gart_manager = {
42375- nv04_gart_manager_init,
42376- nv04_gart_manager_fini,
42377- nv04_gart_manager_new,
42378- nv04_gart_manager_del,
42379- nv04_gart_manager_debug
42380+ .init = nv04_gart_manager_init,
42381+ .takedown = nv04_gart_manager_fini,
42382+ .get_node = nv04_gart_manager_new,
42383+ .put_node = nv04_gart_manager_del,
42384+ .debug = nv04_gart_manager_debug
42385 };
42386
42387 int
42388diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
42389index c7592ec..dd45ebc 100644
42390--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
42391+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
42392@@ -72,7 +72,7 @@ nouveau_switcheroo_can_switch(struct pci_dev *pdev)
42393 * locking inversion with the driver load path. And the access here is
42394 * completely racy anyway. So don't bother with locking for now.
42395 */
42396- return dev->open_count == 0;
42397+ return local_read(&dev->open_count) == 0;
42398 }
42399
42400 static const struct vga_switcheroo_client_ops
42401diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
42402index eb89653..613cf71 100644
42403--- a/drivers/gpu/drm/qxl/qxl_cmd.c
42404+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
42405@@ -285,27 +285,27 @@ static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port,
42406 int ret;
42407
42408 mutex_lock(&qdev->async_io_mutex);
42409- irq_num = atomic_read(&qdev->irq_received_io_cmd);
42410+ irq_num = atomic_read_unchecked(&qdev->irq_received_io_cmd);
42411 if (qdev->last_sent_io_cmd > irq_num) {
42412 if (intr)
42413 ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
42414- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
42415+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
42416 else
42417 ret = wait_event_timeout(qdev->io_cmd_event,
42418- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
42419+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
42420 /* 0 is timeout, just bail the "hw" has gone away */
42421 if (ret <= 0)
42422 goto out;
42423- irq_num = atomic_read(&qdev->irq_received_io_cmd);
42424+ irq_num = atomic_read_unchecked(&qdev->irq_received_io_cmd);
42425 }
42426 outb(val, addr);
42427 qdev->last_sent_io_cmd = irq_num + 1;
42428 if (intr)
42429 ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
42430- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
42431+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
42432 else
42433 ret = wait_event_timeout(qdev->io_cmd_event,
42434- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
42435+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
42436 out:
42437 if (ret > 0)
42438 ret = 0;
42439diff --git a/drivers/gpu/drm/qxl/qxl_debugfs.c b/drivers/gpu/drm/qxl/qxl_debugfs.c
42440index c3c2bbd..bc3c0fb 100644
42441--- a/drivers/gpu/drm/qxl/qxl_debugfs.c
42442+++ b/drivers/gpu/drm/qxl/qxl_debugfs.c
42443@@ -42,10 +42,10 @@ qxl_debugfs_irq_received(struct seq_file *m, void *data)
42444 struct drm_info_node *node = (struct drm_info_node *) m->private;
42445 struct qxl_device *qdev = node->minor->dev->dev_private;
42446
42447- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received));
42448- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_display));
42449- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_cursor));
42450- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_io_cmd));
42451+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received));
42452+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_display));
42453+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_cursor));
42454+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_io_cmd));
42455 seq_printf(m, "%d\n", qdev->irq_received_error);
42456 return 0;
42457 }
42458diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
42459index 36ed40b..0397633 100644
42460--- a/drivers/gpu/drm/qxl/qxl_drv.h
42461+++ b/drivers/gpu/drm/qxl/qxl_drv.h
42462@@ -290,10 +290,10 @@ struct qxl_device {
42463 unsigned int last_sent_io_cmd;
42464
42465 /* interrupt handling */
42466- atomic_t irq_received;
42467- atomic_t irq_received_display;
42468- atomic_t irq_received_cursor;
42469- atomic_t irq_received_io_cmd;
42470+ atomic_unchecked_t irq_received;
42471+ atomic_unchecked_t irq_received_display;
42472+ atomic_unchecked_t irq_received_cursor;
42473+ atomic_unchecked_t irq_received_io_cmd;
42474 unsigned irq_received_error;
42475 wait_queue_head_t display_event;
42476 wait_queue_head_t cursor_event;
42477diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
42478index b110883..dd06418 100644
42479--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
42480+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
42481@@ -181,7 +181,7 @@ static int qxl_process_single_command(struct qxl_device *qdev,
42482
42483 /* TODO copy slow path code from i915 */
42484 fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE));
42485- unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void *)(unsigned long)cmd->command, cmd->command_size);
42486+ unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void __force_user *)(unsigned long)cmd->command, cmd->command_size);
42487
42488 {
42489 struct qxl_drawable *draw = fb_cmd;
42490@@ -201,7 +201,7 @@ static int qxl_process_single_command(struct qxl_device *qdev,
42491 struct drm_qxl_reloc reloc;
42492
42493 if (copy_from_user(&reloc,
42494- &((struct drm_qxl_reloc *)(uintptr_t)cmd->relocs)[i],
42495+ &((struct drm_qxl_reloc __force_user *)(uintptr_t)cmd->relocs)[i],
42496 sizeof(reloc))) {
42497 ret = -EFAULT;
42498 goto out_free_bos;
42499@@ -294,10 +294,10 @@ static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
42500
42501 for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) {
42502
42503- struct drm_qxl_command *commands =
42504- (struct drm_qxl_command *)(uintptr_t)execbuffer->commands;
42505+ struct drm_qxl_command __user *commands =
42506+ (struct drm_qxl_command __user *)(uintptr_t)execbuffer->commands;
42507
42508- if (copy_from_user(&user_cmd, &commands[cmd_num],
42509+ if (copy_from_user(&user_cmd, (struct drm_qxl_command __force_user *)&commands[cmd_num],
42510 sizeof(user_cmd)))
42511 return -EFAULT;
42512
42513diff --git a/drivers/gpu/drm/qxl/qxl_irq.c b/drivers/gpu/drm/qxl/qxl_irq.c
42514index 0bf1e20..42a7310 100644
42515--- a/drivers/gpu/drm/qxl/qxl_irq.c
42516+++ b/drivers/gpu/drm/qxl/qxl_irq.c
42517@@ -36,19 +36,19 @@ irqreturn_t qxl_irq_handler(int irq, void *arg)
42518 if (!pending)
42519 return IRQ_NONE;
42520
42521- atomic_inc(&qdev->irq_received);
42522+ atomic_inc_unchecked(&qdev->irq_received);
42523
42524 if (pending & QXL_INTERRUPT_DISPLAY) {
42525- atomic_inc(&qdev->irq_received_display);
42526+ atomic_inc_unchecked(&qdev->irq_received_display);
42527 wake_up_all(&qdev->display_event);
42528 qxl_queue_garbage_collect(qdev, false);
42529 }
42530 if (pending & QXL_INTERRUPT_CURSOR) {
42531- atomic_inc(&qdev->irq_received_cursor);
42532+ atomic_inc_unchecked(&qdev->irq_received_cursor);
42533 wake_up_all(&qdev->cursor_event);
42534 }
42535 if (pending & QXL_INTERRUPT_IO_CMD) {
42536- atomic_inc(&qdev->irq_received_io_cmd);
42537+ atomic_inc_unchecked(&qdev->irq_received_io_cmd);
42538 wake_up_all(&qdev->io_cmd_event);
42539 }
42540 if (pending & QXL_INTERRUPT_ERROR) {
42541@@ -85,10 +85,10 @@ int qxl_irq_init(struct qxl_device *qdev)
42542 init_waitqueue_head(&qdev->io_cmd_event);
42543 INIT_WORK(&qdev->client_monitors_config_work,
42544 qxl_client_monitors_config_work_func);
42545- atomic_set(&qdev->irq_received, 0);
42546- atomic_set(&qdev->irq_received_display, 0);
42547- atomic_set(&qdev->irq_received_cursor, 0);
42548- atomic_set(&qdev->irq_received_io_cmd, 0);
42549+ atomic_set_unchecked(&qdev->irq_received, 0);
42550+ atomic_set_unchecked(&qdev->irq_received_display, 0);
42551+ atomic_set_unchecked(&qdev->irq_received_cursor, 0);
42552+ atomic_set_unchecked(&qdev->irq_received_io_cmd, 0);
42553 qdev->irq_received_error = 0;
42554 ret = drm_irq_install(qdev->ddev, qdev->ddev->pdev->irq);
42555 qdev->ram_header->int_mask = QXL_INTERRUPT_MASK;
42556diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
42557index 71a1bae..cb1f103 100644
42558--- a/drivers/gpu/drm/qxl/qxl_ttm.c
42559+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
42560@@ -103,7 +103,7 @@ static void qxl_ttm_global_fini(struct qxl_device *qdev)
42561 }
42562 }
42563
42564-static struct vm_operations_struct qxl_ttm_vm_ops;
42565+static vm_operations_struct_no_const qxl_ttm_vm_ops __read_only;
42566 static const struct vm_operations_struct *ttm_vm_ops;
42567
42568 static int qxl_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
42569@@ -145,8 +145,10 @@ int qxl_mmap(struct file *filp, struct vm_area_struct *vma)
42570 return r;
42571 if (unlikely(ttm_vm_ops == NULL)) {
42572 ttm_vm_ops = vma->vm_ops;
42573+ pax_open_kernel();
42574 qxl_ttm_vm_ops = *ttm_vm_ops;
42575 qxl_ttm_vm_ops.fault = &qxl_ttm_fault;
42576+ pax_close_kernel();
42577 }
42578 vma->vm_ops = &qxl_ttm_vm_ops;
42579 return 0;
42580@@ -555,25 +557,23 @@ static int qxl_mm_dump_table(struct seq_file *m, void *data)
42581 static int qxl_ttm_debugfs_init(struct qxl_device *qdev)
42582 {
42583 #if defined(CONFIG_DEBUG_FS)
42584- static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES];
42585- static char qxl_mem_types_names[QXL_DEBUGFS_MEM_TYPES][32];
42586- unsigned i;
42587+ static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES] = {
42588+ {
42589+ .name = "qxl_mem_mm",
42590+ .show = &qxl_mm_dump_table,
42591+ },
42592+ {
42593+ .name = "qxl_surf_mm",
42594+ .show = &qxl_mm_dump_table,
42595+ }
42596+ };
42597
42598- for (i = 0; i < QXL_DEBUGFS_MEM_TYPES; i++) {
42599- if (i == 0)
42600- sprintf(qxl_mem_types_names[i], "qxl_mem_mm");
42601- else
42602- sprintf(qxl_mem_types_names[i], "qxl_surf_mm");
42603- qxl_mem_types_list[i].name = qxl_mem_types_names[i];
42604- qxl_mem_types_list[i].show = &qxl_mm_dump_table;
42605- qxl_mem_types_list[i].driver_features = 0;
42606- if (i == 0)
42607- qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
42608- else
42609- qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
42610+ pax_open_kernel();
42611+ *(void **)&qxl_mem_types_list[0].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
42612+ *(void **)&qxl_mem_types_list[1].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
42613+ pax_close_kernel();
42614
42615- }
42616- return qxl_debugfs_add_files(qdev, qxl_mem_types_list, i);
42617+ return qxl_debugfs_add_files(qdev, qxl_mem_types_list, QXL_DEBUGFS_MEM_TYPES);
42618 #else
42619 return 0;
42620 #endif
42621diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
42622index 59459fe..be26b31 100644
42623--- a/drivers/gpu/drm/r128/r128_cce.c
42624+++ b/drivers/gpu/drm/r128/r128_cce.c
42625@@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
42626
42627 /* GH: Simple idle check.
42628 */
42629- atomic_set(&dev_priv->idle_count, 0);
42630+ atomic_set_unchecked(&dev_priv->idle_count, 0);
42631
42632 /* We don't support anything other than bus-mastering ring mode,
42633 * but the ring can be in either AGP or PCI space for the ring
42634diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
42635index 5bf3f5f..7000661 100644
42636--- a/drivers/gpu/drm/r128/r128_drv.h
42637+++ b/drivers/gpu/drm/r128/r128_drv.h
42638@@ -90,14 +90,14 @@ typedef struct drm_r128_private {
42639 int is_pci;
42640 unsigned long cce_buffers_offset;
42641
42642- atomic_t idle_count;
42643+ atomic_unchecked_t idle_count;
42644
42645 int page_flipping;
42646 int current_page;
42647 u32 crtc_offset;
42648 u32 crtc_offset_cntl;
42649
42650- atomic_t vbl_received;
42651+ atomic_unchecked_t vbl_received;
42652
42653 u32 color_fmt;
42654 unsigned int front_offset;
42655diff --git a/drivers/gpu/drm/r128/r128_ioc32.c b/drivers/gpu/drm/r128/r128_ioc32.c
42656index 663f38c..c689495 100644
42657--- a/drivers/gpu/drm/r128/r128_ioc32.c
42658+++ b/drivers/gpu/drm/r128/r128_ioc32.c
42659@@ -178,7 +178,7 @@ static int compat_r128_getparam(struct file *file, unsigned int cmd,
42660 return drm_ioctl(file, DRM_IOCTL_R128_GETPARAM, (unsigned long)getparam);
42661 }
42662
42663-drm_ioctl_compat_t *r128_compat_ioctls[] = {
42664+drm_ioctl_compat_t r128_compat_ioctls[] = {
42665 [DRM_R128_INIT] = compat_r128_init,
42666 [DRM_R128_DEPTH] = compat_r128_depth,
42667 [DRM_R128_STIPPLE] = compat_r128_stipple,
42668@@ -197,18 +197,15 @@ drm_ioctl_compat_t *r128_compat_ioctls[] = {
42669 long r128_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
42670 {
42671 unsigned int nr = DRM_IOCTL_NR(cmd);
42672- drm_ioctl_compat_t *fn = NULL;
42673 int ret;
42674
42675 if (nr < DRM_COMMAND_BASE)
42676 return drm_compat_ioctl(filp, cmd, arg);
42677
42678- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(r128_compat_ioctls))
42679- fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
42680-
42681- if (fn != NULL)
42682+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(r128_compat_ioctls)) {
42683+ drm_ioctl_compat_t fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
42684 ret = (*fn) (filp, cmd, arg);
42685- else
42686+ } else
42687 ret = drm_ioctl(filp, cmd, arg);
42688
42689 return ret;
42690diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
42691index c2ae496..30b5993 100644
42692--- a/drivers/gpu/drm/r128/r128_irq.c
42693+++ b/drivers/gpu/drm/r128/r128_irq.c
42694@@ -41,7 +41,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
42695 if (crtc != 0)
42696 return 0;
42697
42698- return atomic_read(&dev_priv->vbl_received);
42699+ return atomic_read_unchecked(&dev_priv->vbl_received);
42700 }
42701
42702 irqreturn_t r128_driver_irq_handler(int irq, void *arg)
42703@@ -55,7 +55,7 @@ irqreturn_t r128_driver_irq_handler(int irq, void *arg)
42704 /* VBLANK interrupt */
42705 if (status & R128_CRTC_VBLANK_INT) {
42706 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
42707- atomic_inc(&dev_priv->vbl_received);
42708+ atomic_inc_unchecked(&dev_priv->vbl_received);
42709 drm_handle_vblank(dev, 0);
42710 return IRQ_HANDLED;
42711 }
42712diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
42713index 575e986..66e62ca 100644
42714--- a/drivers/gpu/drm/r128/r128_state.c
42715+++ b/drivers/gpu/drm/r128/r128_state.c
42716@@ -320,10 +320,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
42717
42718 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
42719 {
42720- if (atomic_read(&dev_priv->idle_count) == 0)
42721+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
42722 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
42723 else
42724- atomic_set(&dev_priv->idle_count, 0);
42725+ atomic_set_unchecked(&dev_priv->idle_count, 0);
42726 }
42727
42728 #endif
42729diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
42730index 4a85bb6..aaea819 100644
42731--- a/drivers/gpu/drm/radeon/mkregtable.c
42732+++ b/drivers/gpu/drm/radeon/mkregtable.c
42733@@ -624,14 +624,14 @@ static int parser_auth(struct table *t, const char *filename)
42734 regex_t mask_rex;
42735 regmatch_t match[4];
42736 char buf[1024];
42737- size_t end;
42738+ long end;
42739 int len;
42740 int done = 0;
42741 int r;
42742 unsigned o;
42743 struct offset *offset;
42744 char last_reg_s[10];
42745- int last_reg;
42746+ unsigned long last_reg;
42747
42748 if (regcomp
42749 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
42750diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
42751index 12c8329..a69e2e8 100644
42752--- a/drivers/gpu/drm/radeon/radeon_device.c
42753+++ b/drivers/gpu/drm/radeon/radeon_device.c
42754@@ -1213,7 +1213,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
42755 * locking inversion with the driver load path. And the access here is
42756 * completely racy anyway. So don't bother with locking for now.
42757 */
42758- return dev->open_count == 0;
42759+ return local_read(&dev->open_count) == 0;
42760 }
42761
42762 static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
42763diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
42764index dafd812..1bf20c7 100644
42765--- a/drivers/gpu/drm/radeon/radeon_drv.h
42766+++ b/drivers/gpu/drm/radeon/radeon_drv.h
42767@@ -262,7 +262,7 @@ typedef struct drm_radeon_private {
42768
42769 /* SW interrupt */
42770 wait_queue_head_t swi_queue;
42771- atomic_t swi_emitted;
42772+ atomic_unchecked_t swi_emitted;
42773 int vblank_crtc;
42774 uint32_t irq_enable_reg;
42775 uint32_t r500_disp_irq_reg;
42776diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
42777index 0b98ea1..0881827 100644
42778--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
42779+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
42780@@ -358,7 +358,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
42781 request = compat_alloc_user_space(sizeof(*request));
42782 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
42783 || __put_user(req32.param, &request->param)
42784- || __put_user((void __user *)(unsigned long)req32.value,
42785+ || __put_user((unsigned long)req32.value,
42786 &request->value))
42787 return -EFAULT;
42788
42789@@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
42790 #define compat_radeon_cp_setparam NULL
42791 #endif /* X86_64 || IA64 */
42792
42793-static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
42794+static drm_ioctl_compat_t radeon_compat_ioctls[] = {
42795 [DRM_RADEON_CP_INIT] = compat_radeon_cp_init,
42796 [DRM_RADEON_CLEAR] = compat_radeon_cp_clear,
42797 [DRM_RADEON_STIPPLE] = compat_radeon_cp_stipple,
42798@@ -393,18 +393,15 @@ static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
42799 long radeon_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
42800 {
42801 unsigned int nr = DRM_IOCTL_NR(cmd);
42802- drm_ioctl_compat_t *fn = NULL;
42803 int ret;
42804
42805 if (nr < DRM_COMMAND_BASE)
42806 return drm_compat_ioctl(filp, cmd, arg);
42807
42808- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(radeon_compat_ioctls))
42809- fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
42810-
42811- if (fn != NULL)
42812+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(radeon_compat_ioctls)) {
42813+ drm_ioctl_compat_t fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
42814 ret = (*fn) (filp, cmd, arg);
42815- else
42816+ } else
42817 ret = drm_ioctl(filp, cmd, arg);
42818
42819 return ret;
42820diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
42821index 244b19b..c19226d 100644
42822--- a/drivers/gpu/drm/radeon/radeon_irq.c
42823+++ b/drivers/gpu/drm/radeon/radeon_irq.c
42824@@ -226,8 +226,8 @@ static int radeon_emit_irq(struct drm_device * dev)
42825 unsigned int ret;
42826 RING_LOCALS;
42827
42828- atomic_inc(&dev_priv->swi_emitted);
42829- ret = atomic_read(&dev_priv->swi_emitted);
42830+ atomic_inc_unchecked(&dev_priv->swi_emitted);
42831+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
42832
42833 BEGIN_RING(4);
42834 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
42835@@ -353,7 +353,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
42836 drm_radeon_private_t *dev_priv =
42837 (drm_radeon_private_t *) dev->dev_private;
42838
42839- atomic_set(&dev_priv->swi_emitted, 0);
42840+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
42841 init_waitqueue_head(&dev_priv->swi_queue);
42842
42843 dev->max_vblank_count = 0x001fffff;
42844diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
42845index 23bb64f..69d7234 100644
42846--- a/drivers/gpu/drm/radeon/radeon_state.c
42847+++ b/drivers/gpu/drm/radeon/radeon_state.c
42848@@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
42849 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
42850 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
42851
42852- if (copy_from_user(&depth_boxes, clear->depth_boxes,
42853+ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || copy_from_user(&depth_boxes, clear->depth_boxes,
42854 sarea_priv->nbox * sizeof(depth_boxes[0])))
42855 return -EFAULT;
42856
42857@@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
42858 {
42859 drm_radeon_private_t *dev_priv = dev->dev_private;
42860 drm_radeon_getparam_t *param = data;
42861- int value;
42862+ int value = 0;
42863
42864 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
42865
42866diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
42867index 72afe82..056a57a 100644
42868--- a/drivers/gpu/drm/radeon/radeon_ttm.c
42869+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
42870@@ -801,7 +801,7 @@ void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
42871 man->size = size >> PAGE_SHIFT;
42872 }
42873
42874-static struct vm_operations_struct radeon_ttm_vm_ops;
42875+static vm_operations_struct_no_const radeon_ttm_vm_ops __read_only;
42876 static const struct vm_operations_struct *ttm_vm_ops = NULL;
42877
42878 static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
42879@@ -842,8 +842,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
42880 }
42881 if (unlikely(ttm_vm_ops == NULL)) {
42882 ttm_vm_ops = vma->vm_ops;
42883+ pax_open_kernel();
42884 radeon_ttm_vm_ops = *ttm_vm_ops;
42885 radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
42886+ pax_close_kernel();
42887 }
42888 vma->vm_ops = &radeon_ttm_vm_ops;
42889 return 0;
42890diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
42891index 6553fd2..aecd29c 100644
42892--- a/drivers/gpu/drm/tegra/dc.c
42893+++ b/drivers/gpu/drm/tegra/dc.c
42894@@ -1243,7 +1243,7 @@ static int tegra_dc_debugfs_init(struct tegra_dc *dc, struct drm_minor *minor)
42895 }
42896
42897 for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
42898- dc->debugfs_files[i].data = dc;
42899+ *(void **)&dc->debugfs_files[i].data = dc;
42900
42901 err = drm_debugfs_create_files(dc->debugfs_files,
42902 ARRAY_SIZE(debugfs_files),
42903diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
42904index f787445..2df2c65 100644
42905--- a/drivers/gpu/drm/tegra/dsi.c
42906+++ b/drivers/gpu/drm/tegra/dsi.c
42907@@ -41,7 +41,7 @@ struct tegra_dsi {
42908 struct clk *clk_lp;
42909 struct clk *clk;
42910
42911- struct drm_info_list *debugfs_files;
42912+ drm_info_list_no_const *debugfs_files;
42913 struct drm_minor *minor;
42914 struct dentry *debugfs;
42915
42916diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
42917index ffe2654..03c7b1c 100644
42918--- a/drivers/gpu/drm/tegra/hdmi.c
42919+++ b/drivers/gpu/drm/tegra/hdmi.c
42920@@ -60,7 +60,7 @@ struct tegra_hdmi {
42921 bool stereo;
42922 bool dvi;
42923
42924- struct drm_info_list *debugfs_files;
42925+ drm_info_list_no_const *debugfs_files;
42926 struct drm_minor *minor;
42927 struct dentry *debugfs;
42928 };
42929diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c
42930index 9e103a48..0e117f3 100644
42931--- a/drivers/gpu/drm/ttm/ttm_bo_manager.c
42932+++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c
42933@@ -147,10 +147,10 @@ static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
42934 }
42935
42936 const struct ttm_mem_type_manager_func ttm_bo_manager_func = {
42937- ttm_bo_man_init,
42938- ttm_bo_man_takedown,
42939- ttm_bo_man_get_node,
42940- ttm_bo_man_put_node,
42941- ttm_bo_man_debug
42942+ .init = ttm_bo_man_init,
42943+ .takedown = ttm_bo_man_takedown,
42944+ .get_node = ttm_bo_man_get_node,
42945+ .put_node = ttm_bo_man_put_node,
42946+ .debug = ttm_bo_man_debug
42947 };
42948 EXPORT_SYMBOL(ttm_bo_manager_func);
42949diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
42950index dbc2def..0a9f710 100644
42951--- a/drivers/gpu/drm/ttm/ttm_memory.c
42952+++ b/drivers/gpu/drm/ttm/ttm_memory.c
42953@@ -264,7 +264,7 @@ static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
42954 zone->glob = glob;
42955 glob->zone_kernel = zone;
42956 ret = kobject_init_and_add(
42957- &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
42958+ &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", zone->name);
42959 if (unlikely(ret != 0)) {
42960 kobject_put(&zone->kobj);
42961 return ret;
42962@@ -347,7 +347,7 @@ static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
42963 zone->glob = glob;
42964 glob->zone_dma32 = zone;
42965 ret = kobject_init_and_add(
42966- &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
42967+ &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", zone->name);
42968 if (unlikely(ret != 0)) {
42969 kobject_put(&zone->kobj);
42970 return ret;
42971diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
42972index d1da339..829235e 100644
42973--- a/drivers/gpu/drm/udl/udl_fb.c
42974+++ b/drivers/gpu/drm/udl/udl_fb.c
42975@@ -367,7 +367,6 @@ static int udl_fb_release(struct fb_info *info, int user)
42976 fb_deferred_io_cleanup(info);
42977 kfree(info->fbdefio);
42978 info->fbdefio = NULL;
42979- info->fbops->fb_mmap = udl_fb_mmap;
42980 }
42981
42982 pr_warn("released /dev/fb%d user=%d count=%d\n",
42983diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
42984index ad02732..144f5ed 100644
42985--- a/drivers/gpu/drm/via/via_drv.h
42986+++ b/drivers/gpu/drm/via/via_drv.h
42987@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
42988 typedef uint32_t maskarray_t[5];
42989
42990 typedef struct drm_via_irq {
42991- atomic_t irq_received;
42992+ atomic_unchecked_t irq_received;
42993 uint32_t pending_mask;
42994 uint32_t enable_mask;
42995 wait_queue_head_t irq_queue;
42996@@ -75,7 +75,7 @@ typedef struct drm_via_private {
42997 struct timeval last_vblank;
42998 int last_vblank_valid;
42999 unsigned usec_per_vblank;
43000- atomic_t vbl_received;
43001+ atomic_unchecked_t vbl_received;
43002 drm_via_state_t hc_state;
43003 char pci_buf[VIA_PCI_BUF_SIZE];
43004 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
43005diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
43006index 1319433..a993b0c 100644
43007--- a/drivers/gpu/drm/via/via_irq.c
43008+++ b/drivers/gpu/drm/via/via_irq.c
43009@@ -101,7 +101,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
43010 if (crtc != 0)
43011 return 0;
43012
43013- return atomic_read(&dev_priv->vbl_received);
43014+ return atomic_read_unchecked(&dev_priv->vbl_received);
43015 }
43016
43017 irqreturn_t via_driver_irq_handler(int irq, void *arg)
43018@@ -116,8 +116,8 @@ irqreturn_t via_driver_irq_handler(int irq, void *arg)
43019
43020 status = VIA_READ(VIA_REG_INTERRUPT);
43021 if (status & VIA_IRQ_VBLANK_PENDING) {
43022- atomic_inc(&dev_priv->vbl_received);
43023- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
43024+ atomic_inc_unchecked(&dev_priv->vbl_received);
43025+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
43026 do_gettimeofday(&cur_vblank);
43027 if (dev_priv->last_vblank_valid) {
43028 dev_priv->usec_per_vblank =
43029@@ -127,7 +127,7 @@ irqreturn_t via_driver_irq_handler(int irq, void *arg)
43030 dev_priv->last_vblank = cur_vblank;
43031 dev_priv->last_vblank_valid = 1;
43032 }
43033- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
43034+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
43035 DRM_DEBUG("US per vblank is: %u\n",
43036 dev_priv->usec_per_vblank);
43037 }
43038@@ -137,7 +137,7 @@ irqreturn_t via_driver_irq_handler(int irq, void *arg)
43039
43040 for (i = 0; i < dev_priv->num_irqs; ++i) {
43041 if (status & cur_irq->pending_mask) {
43042- atomic_inc(&cur_irq->irq_received);
43043+ atomic_inc_unchecked(&cur_irq->irq_received);
43044 wake_up(&cur_irq->irq_queue);
43045 handled = 1;
43046 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
43047@@ -242,11 +242,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
43048 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ,
43049 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
43050 masks[irq][4]));
43051- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
43052+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
43053 } else {
43054 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ,
43055 (((cur_irq_sequence =
43056- atomic_read(&cur_irq->irq_received)) -
43057+ atomic_read_unchecked(&cur_irq->irq_received)) -
43058 *sequence) <= (1 << 23)));
43059 }
43060 *sequence = cur_irq_sequence;
43061@@ -284,7 +284,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
43062 }
43063
43064 for (i = 0; i < dev_priv->num_irqs; ++i) {
43065- atomic_set(&cur_irq->irq_received, 0);
43066+ atomic_set_unchecked(&cur_irq->irq_received, 0);
43067 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
43068 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
43069 init_waitqueue_head(&cur_irq->irq_queue);
43070@@ -366,7 +366,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
43071 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
43072 case VIA_IRQ_RELATIVE:
43073 irqwait->request.sequence +=
43074- atomic_read(&cur_irq->irq_received);
43075+ atomic_read_unchecked(&cur_irq->irq_received);
43076 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
43077 case VIA_IRQ_ABSOLUTE:
43078 break;
43079diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
43080index 99f7317..33a835b 100644
43081--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
43082+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
43083@@ -447,7 +447,7 @@ struct vmw_private {
43084 * Fencing and IRQs.
43085 */
43086
43087- atomic_t marker_seq;
43088+ atomic_unchecked_t marker_seq;
43089 wait_queue_head_t fence_queue;
43090 wait_queue_head_t fifo_queue;
43091 int fence_queue_waiters; /* Protected by hw_mutex */
43092diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
43093index 6eae14d..aa311b3 100644
43094--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
43095+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
43096@@ -154,7 +154,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
43097 (unsigned int) min,
43098 (unsigned int) fifo->capabilities);
43099
43100- atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
43101+ atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno);
43102 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
43103 vmw_marker_queue_init(&fifo->marker_queue);
43104 return vmw_fifo_send_fence(dev_priv, &dummy);
43105@@ -373,7 +373,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
43106 if (reserveable)
43107 iowrite32(bytes, fifo_mem +
43108 SVGA_FIFO_RESERVED);
43109- return fifo_mem + (next_cmd >> 2);
43110+ return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
43111 } else {
43112 need_bounce = true;
43113 }
43114@@ -493,7 +493,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
43115
43116 fm = vmw_fifo_reserve(dev_priv, bytes);
43117 if (unlikely(fm == NULL)) {
43118- *seqno = atomic_read(&dev_priv->marker_seq);
43119+ *seqno = atomic_read_unchecked(&dev_priv->marker_seq);
43120 ret = -ENOMEM;
43121 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
43122 false, 3*HZ);
43123@@ -501,7 +501,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
43124 }
43125
43126 do {
43127- *seqno = atomic_add_return(1, &dev_priv->marker_seq);
43128+ *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq);
43129 } while (*seqno == 0);
43130
43131 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
43132diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
43133index 26f8bdd..90a0008 100644
43134--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
43135+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
43136@@ -165,9 +165,9 @@ static void vmw_gmrid_man_debug(struct ttm_mem_type_manager *man,
43137 }
43138
43139 const struct ttm_mem_type_manager_func vmw_gmrid_manager_func = {
43140- vmw_gmrid_man_init,
43141- vmw_gmrid_man_takedown,
43142- vmw_gmrid_man_get_node,
43143- vmw_gmrid_man_put_node,
43144- vmw_gmrid_man_debug
43145+ .init = vmw_gmrid_man_init,
43146+ .takedown = vmw_gmrid_man_takedown,
43147+ .get_node = vmw_gmrid_man_get_node,
43148+ .put_node = vmw_gmrid_man_put_node,
43149+ .debug = vmw_gmrid_man_debug
43150 };
43151diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
43152index 37881ec..319065d 100644
43153--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
43154+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
43155@@ -235,7 +235,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
43156 int ret;
43157
43158 num_clips = arg->num_clips;
43159- clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
43160+ clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
43161
43162 if (unlikely(num_clips == 0))
43163 return 0;
43164@@ -318,7 +318,7 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
43165 int ret;
43166
43167 num_clips = arg->num_clips;
43168- clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
43169+ clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
43170
43171 if (unlikely(num_clips == 0))
43172 return 0;
43173diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
43174index 0c42376..6febe77 100644
43175--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
43176+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
43177@@ -107,7 +107,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
43178 * emitted. Then the fence is stale and signaled.
43179 */
43180
43181- ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
43182+ ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno)
43183 > VMW_FENCE_WRAP);
43184
43185 return ret;
43186@@ -138,7 +138,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
43187
43188 if (fifo_idle)
43189 down_read(&fifo_state->rwsem);
43190- signal_seq = atomic_read(&dev_priv->marker_seq);
43191+ signal_seq = atomic_read_unchecked(&dev_priv->marker_seq);
43192 ret = 0;
43193
43194 for (;;) {
43195diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
43196index efd1ffd..0ae13ca 100644
43197--- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
43198+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
43199@@ -135,7 +135,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
43200 while (!vmw_lag_lt(queue, us)) {
43201 spin_lock(&queue->lock);
43202 if (list_empty(&queue->head))
43203- seqno = atomic_read(&dev_priv->marker_seq);
43204+ seqno = atomic_read_unchecked(&dev_priv->marker_seq);
43205 else {
43206 marker = list_first_entry(&queue->head,
43207 struct vmw_marker, head);
43208diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
43209index 37ac7b5..d52a5c9 100644
43210--- a/drivers/gpu/vga/vga_switcheroo.c
43211+++ b/drivers/gpu/vga/vga_switcheroo.c
43212@@ -644,7 +644,7 @@ static int vga_switcheroo_runtime_resume(struct device *dev)
43213
43214 /* this version is for the case where the power switch is separate
43215 to the device being powered down. */
43216-int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain)
43217+int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain)
43218 {
43219 /* copy over all the bus versions */
43220 if (dev->bus && dev->bus->pm) {
43221@@ -695,7 +695,7 @@ static int vga_switcheroo_runtime_resume_hdmi_audio(struct device *dev)
43222 return ret;
43223 }
43224
43225-int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain)
43226+int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain)
43227 {
43228 /* copy over all the bus versions */
43229 if (dev->bus && dev->bus->pm) {
43230diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
43231index 12b6e67..ddd983c 100644
43232--- a/drivers/hid/hid-core.c
43233+++ b/drivers/hid/hid-core.c
43234@@ -2500,7 +2500,7 @@ EXPORT_SYMBOL_GPL(hid_ignore);
43235
43236 int hid_add_device(struct hid_device *hdev)
43237 {
43238- static atomic_t id = ATOMIC_INIT(0);
43239+ static atomic_unchecked_t id = ATOMIC_INIT(0);
43240 int ret;
43241
43242 if (WARN_ON(hdev->status & HID_STAT_ADDED))
43243@@ -2542,7 +2542,7 @@ int hid_add_device(struct hid_device *hdev)
43244 /* XXX hack, any other cleaner solution after the driver core
43245 * is converted to allow more than 20 bytes as the device name? */
43246 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
43247- hdev->vendor, hdev->product, atomic_inc_return(&id));
43248+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
43249
43250 hid_debug_register(hdev, dev_name(&hdev->dev));
43251 ret = device_add(&hdev->dev);
43252diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
43253index 9bf8637..f462416 100644
43254--- a/drivers/hid/hid-logitech-dj.c
43255+++ b/drivers/hid/hid-logitech-dj.c
43256@@ -682,6 +682,12 @@ static int logi_dj_raw_event(struct hid_device *hdev,
43257 * device (via hid_input_report() ) and return 1 so hid-core does not do
43258 * anything else with it.
43259 */
43260+ if ((dj_report->device_index < DJ_DEVICE_INDEX_MIN) ||
43261+ (dj_report->device_index > DJ_DEVICE_INDEX_MAX)) {
43262+ dev_err(&hdev->dev, "%s: invalid device index:%d\n",
43263+ __func__, dj_report->device_index);
43264+ return false;
43265+ }
43266
43267 /* case 1) */
43268 if (data[0] != REPORT_ID_DJ_SHORT)
43269diff --git a/drivers/hid/hid-wiimote-debug.c b/drivers/hid/hid-wiimote-debug.c
43270index c13fb5b..55a3802 100644
43271--- a/drivers/hid/hid-wiimote-debug.c
43272+++ b/drivers/hid/hid-wiimote-debug.c
43273@@ -66,7 +66,7 @@ static ssize_t wiidebug_eeprom_read(struct file *f, char __user *u, size_t s,
43274 else if (size == 0)
43275 return -EIO;
43276
43277- if (copy_to_user(u, buf, size))
43278+ if (size > sizeof(buf) || copy_to_user(u, buf, size))
43279 return -EFAULT;
43280
43281 *off += size;
43282diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c
43283index 0cb92e3..c7d453d 100644
43284--- a/drivers/hid/uhid.c
43285+++ b/drivers/hid/uhid.c
43286@@ -47,7 +47,7 @@ struct uhid_device {
43287 struct mutex report_lock;
43288 wait_queue_head_t report_wait;
43289 atomic_t report_done;
43290- atomic_t report_id;
43291+ atomic_unchecked_t report_id;
43292 struct uhid_event report_buf;
43293 };
43294
43295@@ -163,7 +163,7 @@ static int uhid_hid_get_raw(struct hid_device *hid, unsigned char rnum,
43296
43297 spin_lock_irqsave(&uhid->qlock, flags);
43298 ev->type = UHID_FEATURE;
43299- ev->u.feature.id = atomic_inc_return(&uhid->report_id);
43300+ ev->u.feature.id = atomic_inc_return_unchecked(&uhid->report_id);
43301 ev->u.feature.rnum = rnum;
43302 ev->u.feature.rtype = report_type;
43303
43304@@ -538,7 +538,7 @@ static int uhid_dev_feature_answer(struct uhid_device *uhid,
43305 spin_lock_irqsave(&uhid->qlock, flags);
43306
43307 /* id for old report; drop it silently */
43308- if (atomic_read(&uhid->report_id) != ev->u.feature_answer.id)
43309+ if (atomic_read_unchecked(&uhid->report_id) != ev->u.feature_answer.id)
43310 goto unlock;
43311 if (atomic_read(&uhid->report_done))
43312 goto unlock;
43313diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
43314index 19bad59..ca24eaf 100644
43315--- a/drivers/hv/channel.c
43316+++ b/drivers/hv/channel.c
43317@@ -366,8 +366,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
43318 unsigned long flags;
43319 int ret = 0;
43320
43321- next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
43322- atomic_inc(&vmbus_connection.next_gpadl_handle);
43323+ next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
43324+ atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
43325
43326 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
43327 if (ret)
43328diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
43329index 3e4235c..877d0e5 100644
43330--- a/drivers/hv/hv.c
43331+++ b/drivers/hv/hv.c
43332@@ -112,7 +112,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
43333 u64 output_address = (output) ? virt_to_phys(output) : 0;
43334 u32 output_address_hi = output_address >> 32;
43335 u32 output_address_lo = output_address & 0xFFFFFFFF;
43336- void *hypercall_page = hv_context.hypercall_page;
43337+ void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
43338
43339 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
43340 "=a"(hv_status_lo) : "d" (control_hi),
43341@@ -156,7 +156,7 @@ int hv_init(void)
43342 /* See if the hypercall page is already set */
43343 rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
43344
43345- virtaddr = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_EXEC);
43346+ virtaddr = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_RX);
43347
43348 if (!virtaddr)
43349 goto cleanup;
43350diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
43351index 5e90c5d..d8fcefb 100644
43352--- a/drivers/hv/hv_balloon.c
43353+++ b/drivers/hv/hv_balloon.c
43354@@ -470,7 +470,7 @@ MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add");
43355
43356 module_param(pressure_report_delay, uint, (S_IRUGO | S_IWUSR));
43357 MODULE_PARM_DESC(pressure_report_delay, "Delay in secs in reporting pressure");
43358-static atomic_t trans_id = ATOMIC_INIT(0);
43359+static atomic_unchecked_t trans_id = ATOMIC_INIT(0);
43360
43361 static int dm_ring_size = (5 * PAGE_SIZE);
43362
43363@@ -893,7 +893,7 @@ static void hot_add_req(struct work_struct *dummy)
43364 pr_info("Memory hot add failed\n");
43365
43366 dm->state = DM_INITIALIZED;
43367- resp.hdr.trans_id = atomic_inc_return(&trans_id);
43368+ resp.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
43369 vmbus_sendpacket(dm->dev->channel, &resp,
43370 sizeof(struct dm_hot_add_response),
43371 (unsigned long)NULL,
43372@@ -973,7 +973,7 @@ static void post_status(struct hv_dynmem_device *dm)
43373 memset(&status, 0, sizeof(struct dm_status));
43374 status.hdr.type = DM_STATUS_REPORT;
43375 status.hdr.size = sizeof(struct dm_status);
43376- status.hdr.trans_id = atomic_inc_return(&trans_id);
43377+ status.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
43378
43379 /*
43380 * The host expects the guest to report free memory.
43381@@ -993,7 +993,7 @@ static void post_status(struct hv_dynmem_device *dm)
43382 * send the status. This can happen if we were interrupted
43383 * after we picked our transaction ID.
43384 */
43385- if (status.hdr.trans_id != atomic_read(&trans_id))
43386+ if (status.hdr.trans_id != atomic_read_unchecked(&trans_id))
43387 return;
43388
43389 /*
43390@@ -1129,7 +1129,7 @@ static void balloon_up(struct work_struct *dummy)
43391 */
43392
43393 do {
43394- bl_resp->hdr.trans_id = atomic_inc_return(&trans_id);
43395+ bl_resp->hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
43396 ret = vmbus_sendpacket(dm_device.dev->channel,
43397 bl_resp,
43398 bl_resp->hdr.size,
43399@@ -1175,7 +1175,7 @@ static void balloon_down(struct hv_dynmem_device *dm,
43400
43401 memset(&resp, 0, sizeof(struct dm_unballoon_response));
43402 resp.hdr.type = DM_UNBALLOON_RESPONSE;
43403- resp.hdr.trans_id = atomic_inc_return(&trans_id);
43404+ resp.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
43405 resp.hdr.size = sizeof(struct dm_unballoon_response);
43406
43407 vmbus_sendpacket(dm_device.dev->channel, &resp,
43408@@ -1239,7 +1239,7 @@ static void version_resp(struct hv_dynmem_device *dm,
43409 memset(&version_req, 0, sizeof(struct dm_version_request));
43410 version_req.hdr.type = DM_VERSION_REQUEST;
43411 version_req.hdr.size = sizeof(struct dm_version_request);
43412- version_req.hdr.trans_id = atomic_inc_return(&trans_id);
43413+ version_req.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
43414 version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN7;
43415 version_req.is_last_attempt = 1;
43416
43417@@ -1409,7 +1409,7 @@ static int balloon_probe(struct hv_device *dev,
43418 memset(&version_req, 0, sizeof(struct dm_version_request));
43419 version_req.hdr.type = DM_VERSION_REQUEST;
43420 version_req.hdr.size = sizeof(struct dm_version_request);
43421- version_req.hdr.trans_id = atomic_inc_return(&trans_id);
43422+ version_req.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
43423 version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN8;
43424 version_req.is_last_attempt = 0;
43425
43426@@ -1440,7 +1440,7 @@ static int balloon_probe(struct hv_device *dev,
43427 memset(&cap_msg, 0, sizeof(struct dm_capabilities));
43428 cap_msg.hdr.type = DM_CAPABILITIES_REPORT;
43429 cap_msg.hdr.size = sizeof(struct dm_capabilities);
43430- cap_msg.hdr.trans_id = atomic_inc_return(&trans_id);
43431+ cap_msg.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
43432
43433 cap_msg.caps.cap_bits.balloon = 1;
43434 cap_msg.caps.cap_bits.hot_add = 1;
43435diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
43436index c386d8d..d6004c4 100644
43437--- a/drivers/hv/hyperv_vmbus.h
43438+++ b/drivers/hv/hyperv_vmbus.h
43439@@ -611,7 +611,7 @@ enum vmbus_connect_state {
43440 struct vmbus_connection {
43441 enum vmbus_connect_state conn_state;
43442
43443- atomic_t next_gpadl_handle;
43444+ atomic_unchecked_t next_gpadl_handle;
43445
43446 /*
43447 * Represents channel interrupts. Each bit position represents a
43448diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
43449index 4d6b269..2e23b86 100644
43450--- a/drivers/hv/vmbus_drv.c
43451+++ b/drivers/hv/vmbus_drv.c
43452@@ -807,10 +807,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
43453 {
43454 int ret = 0;
43455
43456- static atomic_t device_num = ATOMIC_INIT(0);
43457+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
43458
43459 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
43460- atomic_inc_return(&device_num));
43461+ atomic_inc_return_unchecked(&device_num));
43462
43463 child_device_obj->device.bus = &hv_bus;
43464 child_device_obj->device.parent = &hv_acpi_dev->dev;
43465diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
43466index 579bdf9..75118b5 100644
43467--- a/drivers/hwmon/acpi_power_meter.c
43468+++ b/drivers/hwmon/acpi_power_meter.c
43469@@ -116,7 +116,7 @@ struct sensor_template {
43470 struct device_attribute *devattr,
43471 const char *buf, size_t count);
43472 int index;
43473-};
43474+} __do_const;
43475
43476 /* Averaging interval */
43477 static int update_avg_interval(struct acpi_power_meter_resource *resource)
43478@@ -631,7 +631,7 @@ static int register_attrs(struct acpi_power_meter_resource *resource,
43479 struct sensor_template *attrs)
43480 {
43481 struct device *dev = &resource->acpi_dev->dev;
43482- struct sensor_device_attribute *sensors =
43483+ sensor_device_attribute_no_const *sensors =
43484 &resource->sensors[resource->num_sensors];
43485 int res = 0;
43486
43487diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
43488index 3288f13..71cfb4e 100644
43489--- a/drivers/hwmon/applesmc.c
43490+++ b/drivers/hwmon/applesmc.c
43491@@ -1106,7 +1106,7 @@ static int applesmc_create_nodes(struct applesmc_node_group *groups, int num)
43492 {
43493 struct applesmc_node_group *grp;
43494 struct applesmc_dev_attr *node;
43495- struct attribute *attr;
43496+ attribute_no_const *attr;
43497 int ret, i;
43498
43499 for (grp = groups; grp->format; grp++) {
43500diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
43501index cccef87..06ce8ec 100644
43502--- a/drivers/hwmon/asus_atk0110.c
43503+++ b/drivers/hwmon/asus_atk0110.c
43504@@ -147,10 +147,10 @@ MODULE_DEVICE_TABLE(acpi, atk_ids);
43505 struct atk_sensor_data {
43506 struct list_head list;
43507 struct atk_data *data;
43508- struct device_attribute label_attr;
43509- struct device_attribute input_attr;
43510- struct device_attribute limit1_attr;
43511- struct device_attribute limit2_attr;
43512+ device_attribute_no_const label_attr;
43513+ device_attribute_no_const input_attr;
43514+ device_attribute_no_const limit1_attr;
43515+ device_attribute_no_const limit2_attr;
43516 char label_attr_name[ATTR_NAME_SIZE];
43517 char input_attr_name[ATTR_NAME_SIZE];
43518 char limit1_attr_name[ATTR_NAME_SIZE];
43519@@ -270,7 +270,7 @@ static ssize_t atk_name_show(struct device *dev,
43520 static struct device_attribute atk_name_attr =
43521 __ATTR(name, 0444, atk_name_show, NULL);
43522
43523-static void atk_init_attribute(struct device_attribute *attr, char *name,
43524+static void atk_init_attribute(device_attribute_no_const *attr, char *name,
43525 sysfs_show_func show)
43526 {
43527 sysfs_attr_init(&attr->attr);
43528diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
43529index d76f0b7..55ae976 100644
43530--- a/drivers/hwmon/coretemp.c
43531+++ b/drivers/hwmon/coretemp.c
43532@@ -784,7 +784,7 @@ static int coretemp_cpu_callback(struct notifier_block *nfb,
43533 return NOTIFY_OK;
43534 }
43535
43536-static struct notifier_block coretemp_cpu_notifier __refdata = {
43537+static struct notifier_block coretemp_cpu_notifier = {
43538 .notifier_call = coretemp_cpu_callback,
43539 };
43540
43541diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c
43542index 7a8a6fb..015c1fd 100644
43543--- a/drivers/hwmon/ibmaem.c
43544+++ b/drivers/hwmon/ibmaem.c
43545@@ -924,7 +924,7 @@ static int aem_register_sensors(struct aem_data *data,
43546 struct aem_rw_sensor_template *rw)
43547 {
43548 struct device *dev = &data->pdev->dev;
43549- struct sensor_device_attribute *sensors = data->sensors;
43550+ sensor_device_attribute_no_const *sensors = data->sensors;
43551 int err;
43552
43553 /* Set up read-only sensors */
43554diff --git a/drivers/hwmon/iio_hwmon.c b/drivers/hwmon/iio_hwmon.c
43555index 14c82da..09b25d7 100644
43556--- a/drivers/hwmon/iio_hwmon.c
43557+++ b/drivers/hwmon/iio_hwmon.c
43558@@ -61,7 +61,7 @@ static int iio_hwmon_probe(struct platform_device *pdev)
43559 {
43560 struct device *dev = &pdev->dev;
43561 struct iio_hwmon_state *st;
43562- struct sensor_device_attribute *a;
43563+ sensor_device_attribute_no_const *a;
43564 int ret, i;
43565 int in_i = 1, temp_i = 1, curr_i = 1;
43566 enum iio_chan_type type;
43567diff --git a/drivers/hwmon/nct6683.c b/drivers/hwmon/nct6683.c
43568index 7710f46..427a28d 100644
43569--- a/drivers/hwmon/nct6683.c
43570+++ b/drivers/hwmon/nct6683.c
43571@@ -397,11 +397,11 @@ static struct attribute_group *
43572 nct6683_create_attr_group(struct device *dev, struct sensor_template_group *tg,
43573 int repeat)
43574 {
43575- struct sensor_device_attribute_2 *a2;
43576- struct sensor_device_attribute *a;
43577+ sensor_device_attribute_2_no_const *a2;
43578+ sensor_device_attribute_no_const *a;
43579 struct sensor_device_template **t;
43580 struct sensor_device_attr_u *su;
43581- struct attribute_group *group;
43582+ attribute_group_no_const *group;
43583 struct attribute **attrs;
43584 int i, j, count;
43585
43586diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
43587index 504cbdd..35d6f25 100644
43588--- a/drivers/hwmon/nct6775.c
43589+++ b/drivers/hwmon/nct6775.c
43590@@ -943,10 +943,10 @@ static struct attribute_group *
43591 nct6775_create_attr_group(struct device *dev, struct sensor_template_group *tg,
43592 int repeat)
43593 {
43594- struct attribute_group *group;
43595+ attribute_group_no_const *group;
43596 struct sensor_device_attr_u *su;
43597- struct sensor_device_attribute *a;
43598- struct sensor_device_attribute_2 *a2;
43599+ sensor_device_attribute_no_const *a;
43600+ sensor_device_attribute_2_no_const *a2;
43601 struct attribute **attrs;
43602 struct sensor_device_template **t;
43603 int i, count;
43604diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
43605index 291d11f..3f0dbbd 100644
43606--- a/drivers/hwmon/pmbus/pmbus_core.c
43607+++ b/drivers/hwmon/pmbus/pmbus_core.c
43608@@ -783,7 +783,7 @@ static int pmbus_add_attribute(struct pmbus_data *data, struct attribute *attr)
43609 return 0;
43610 }
43611
43612-static void pmbus_dev_attr_init(struct device_attribute *dev_attr,
43613+static void pmbus_dev_attr_init(device_attribute_no_const *dev_attr,
43614 const char *name,
43615 umode_t mode,
43616 ssize_t (*show)(struct device *dev,
43617@@ -800,7 +800,7 @@ static void pmbus_dev_attr_init(struct device_attribute *dev_attr,
43618 dev_attr->store = store;
43619 }
43620
43621-static void pmbus_attr_init(struct sensor_device_attribute *a,
43622+static void pmbus_attr_init(sensor_device_attribute_no_const *a,
43623 const char *name,
43624 umode_t mode,
43625 ssize_t (*show)(struct device *dev,
43626@@ -822,7 +822,7 @@ static int pmbus_add_boolean(struct pmbus_data *data,
43627 u16 reg, u8 mask)
43628 {
43629 struct pmbus_boolean *boolean;
43630- struct sensor_device_attribute *a;
43631+ sensor_device_attribute_no_const *a;
43632
43633 boolean = devm_kzalloc(data->dev, sizeof(*boolean), GFP_KERNEL);
43634 if (!boolean)
43635@@ -847,7 +847,7 @@ static struct pmbus_sensor *pmbus_add_sensor(struct pmbus_data *data,
43636 bool update, bool readonly)
43637 {
43638 struct pmbus_sensor *sensor;
43639- struct device_attribute *a;
43640+ device_attribute_no_const *a;
43641
43642 sensor = devm_kzalloc(data->dev, sizeof(*sensor), GFP_KERNEL);
43643 if (!sensor)
43644@@ -878,7 +878,7 @@ static int pmbus_add_label(struct pmbus_data *data,
43645 const char *lstring, int index)
43646 {
43647 struct pmbus_label *label;
43648- struct device_attribute *a;
43649+ device_attribute_no_const *a;
43650
43651 label = devm_kzalloc(data->dev, sizeof(*label), GFP_KERNEL);
43652 if (!label)
43653diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
43654index 97cd45a..ac54d8b 100644
43655--- a/drivers/hwmon/sht15.c
43656+++ b/drivers/hwmon/sht15.c
43657@@ -169,7 +169,7 @@ struct sht15_data {
43658 int supply_uv;
43659 bool supply_uv_valid;
43660 struct work_struct update_supply_work;
43661- atomic_t interrupt_handled;
43662+ atomic_unchecked_t interrupt_handled;
43663 };
43664
43665 /**
43666@@ -542,13 +542,13 @@ static int sht15_measurement(struct sht15_data *data,
43667 ret = gpio_direction_input(data->pdata->gpio_data);
43668 if (ret)
43669 return ret;
43670- atomic_set(&data->interrupt_handled, 0);
43671+ atomic_set_unchecked(&data->interrupt_handled, 0);
43672
43673 enable_irq(gpio_to_irq(data->pdata->gpio_data));
43674 if (gpio_get_value(data->pdata->gpio_data) == 0) {
43675 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
43676 /* Only relevant if the interrupt hasn't occurred. */
43677- if (!atomic_read(&data->interrupt_handled))
43678+ if (!atomic_read_unchecked(&data->interrupt_handled))
43679 schedule_work(&data->read_work);
43680 }
43681 ret = wait_event_timeout(data->wait_queue,
43682@@ -820,7 +820,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
43683
43684 /* First disable the interrupt */
43685 disable_irq_nosync(irq);
43686- atomic_inc(&data->interrupt_handled);
43687+ atomic_inc_unchecked(&data->interrupt_handled);
43688 /* Then schedule a reading work struct */
43689 if (data->state != SHT15_READING_NOTHING)
43690 schedule_work(&data->read_work);
43691@@ -842,11 +842,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
43692 * If not, then start the interrupt again - care here as could
43693 * have gone low in meantime so verify it hasn't!
43694 */
43695- atomic_set(&data->interrupt_handled, 0);
43696+ atomic_set_unchecked(&data->interrupt_handled, 0);
43697 enable_irq(gpio_to_irq(data->pdata->gpio_data));
43698 /* If still not occurred or another handler was scheduled */
43699 if (gpio_get_value(data->pdata->gpio_data)
43700- || atomic_read(&data->interrupt_handled))
43701+ || atomic_read_unchecked(&data->interrupt_handled))
43702 return;
43703 }
43704
43705diff --git a/drivers/hwmon/via-cputemp.c b/drivers/hwmon/via-cputemp.c
43706index 8df43c5..b07b91d 100644
43707--- a/drivers/hwmon/via-cputemp.c
43708+++ b/drivers/hwmon/via-cputemp.c
43709@@ -296,7 +296,7 @@ static int via_cputemp_cpu_callback(struct notifier_block *nfb,
43710 return NOTIFY_OK;
43711 }
43712
43713-static struct notifier_block via_cputemp_cpu_notifier __refdata = {
43714+static struct notifier_block via_cputemp_cpu_notifier = {
43715 .notifier_call = via_cputemp_cpu_callback,
43716 };
43717
43718diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
43719index 41fc683..a39cfea 100644
43720--- a/drivers/i2c/busses/i2c-amd756-s4882.c
43721+++ b/drivers/i2c/busses/i2c-amd756-s4882.c
43722@@ -43,7 +43,7 @@
43723 extern struct i2c_adapter amd756_smbus;
43724
43725 static struct i2c_adapter *s4882_adapter;
43726-static struct i2c_algorithm *s4882_algo;
43727+static i2c_algorithm_no_const *s4882_algo;
43728
43729 /* Wrapper access functions for multiplexed SMBus */
43730 static DEFINE_MUTEX(amd756_lock);
43731diff --git a/drivers/i2c/busses/i2c-diolan-u2c.c b/drivers/i2c/busses/i2c-diolan-u2c.c
43732index b19a310..d6eece0 100644
43733--- a/drivers/i2c/busses/i2c-diolan-u2c.c
43734+++ b/drivers/i2c/busses/i2c-diolan-u2c.c
43735@@ -98,7 +98,7 @@ MODULE_PARM_DESC(frequency, "I2C clock frequency in hertz");
43736 /* usb layer */
43737
43738 /* Send command to device, and get response. */
43739-static int diolan_usb_transfer(struct i2c_diolan_u2c *dev)
43740+static int __intentional_overflow(-1) diolan_usb_transfer(struct i2c_diolan_u2c *dev)
43741 {
43742 int ret = 0;
43743 int actual;
43744diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
43745index b170bdf..3c76427 100644
43746--- a/drivers/i2c/busses/i2c-nforce2-s4985.c
43747+++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
43748@@ -41,7 +41,7 @@
43749 extern struct i2c_adapter *nforce2_smbus;
43750
43751 static struct i2c_adapter *s4985_adapter;
43752-static struct i2c_algorithm *s4985_algo;
43753+static i2c_algorithm_no_const *s4985_algo;
43754
43755 /* Wrapper access functions for multiplexed SMBus */
43756 static DEFINE_MUTEX(nforce2_lock);
43757diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
43758index 80b47e8..1a6040d9 100644
43759--- a/drivers/i2c/i2c-dev.c
43760+++ b/drivers/i2c/i2c-dev.c
43761@@ -277,7 +277,7 @@ static noinline int i2cdev_ioctl_rdrw(struct i2c_client *client,
43762 break;
43763 }
43764
43765- data_ptrs[i] = (u8 __user *)rdwr_pa[i].buf;
43766+ data_ptrs[i] = (u8 __force_user *)rdwr_pa[i].buf;
43767 rdwr_pa[i].buf = memdup_user(data_ptrs[i], rdwr_pa[i].len);
43768 if (IS_ERR(rdwr_pa[i].buf)) {
43769 res = PTR_ERR(rdwr_pa[i].buf);
43770diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
43771index 0b510ba..4fbb5085 100644
43772--- a/drivers/ide/ide-cd.c
43773+++ b/drivers/ide/ide-cd.c
43774@@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
43775 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
43776 if ((unsigned long)buf & alignment
43777 || blk_rq_bytes(rq) & q->dma_pad_mask
43778- || object_is_on_stack(buf))
43779+ || object_starts_on_stack(buf))
43780 drive->dma = 0;
43781 }
43782 }
43783diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
43784index af3e76d..96dfe5e 100644
43785--- a/drivers/iio/industrialio-core.c
43786+++ b/drivers/iio/industrialio-core.c
43787@@ -555,7 +555,7 @@ static ssize_t iio_write_channel_info(struct device *dev,
43788 }
43789
43790 static
43791-int __iio_device_attr_init(struct device_attribute *dev_attr,
43792+int __iio_device_attr_init(device_attribute_no_const *dev_attr,
43793 const char *postfix,
43794 struct iio_chan_spec const *chan,
43795 ssize_t (*readfunc)(struct device *dev,
43796diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
43797index e28a494..f7c2671 100644
43798--- a/drivers/infiniband/core/cm.c
43799+++ b/drivers/infiniband/core/cm.c
43800@@ -115,7 +115,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
43801
43802 struct cm_counter_group {
43803 struct kobject obj;
43804- atomic_long_t counter[CM_ATTR_COUNT];
43805+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
43806 };
43807
43808 struct cm_counter_attribute {
43809@@ -1398,7 +1398,7 @@ static void cm_dup_req_handler(struct cm_work *work,
43810 struct ib_mad_send_buf *msg = NULL;
43811 int ret;
43812
43813- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
43814+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
43815 counter[CM_REQ_COUNTER]);
43816
43817 /* Quick state check to discard duplicate REQs. */
43818@@ -1785,7 +1785,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
43819 if (!cm_id_priv)
43820 return;
43821
43822- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
43823+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
43824 counter[CM_REP_COUNTER]);
43825 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
43826 if (ret)
43827@@ -1952,7 +1952,7 @@ static int cm_rtu_handler(struct cm_work *work)
43828 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
43829 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
43830 spin_unlock_irq(&cm_id_priv->lock);
43831- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
43832+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
43833 counter[CM_RTU_COUNTER]);
43834 goto out;
43835 }
43836@@ -2135,7 +2135,7 @@ static int cm_dreq_handler(struct cm_work *work)
43837 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
43838 dreq_msg->local_comm_id);
43839 if (!cm_id_priv) {
43840- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
43841+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
43842 counter[CM_DREQ_COUNTER]);
43843 cm_issue_drep(work->port, work->mad_recv_wc);
43844 return -EINVAL;
43845@@ -2160,7 +2160,7 @@ static int cm_dreq_handler(struct cm_work *work)
43846 case IB_CM_MRA_REP_RCVD:
43847 break;
43848 case IB_CM_TIMEWAIT:
43849- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
43850+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
43851 counter[CM_DREQ_COUNTER]);
43852 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
43853 goto unlock;
43854@@ -2174,7 +2174,7 @@ static int cm_dreq_handler(struct cm_work *work)
43855 cm_free_msg(msg);
43856 goto deref;
43857 case IB_CM_DREQ_RCVD:
43858- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
43859+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
43860 counter[CM_DREQ_COUNTER]);
43861 goto unlock;
43862 default:
43863@@ -2541,7 +2541,7 @@ static int cm_mra_handler(struct cm_work *work)
43864 ib_modify_mad(cm_id_priv->av.port->mad_agent,
43865 cm_id_priv->msg, timeout)) {
43866 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
43867- atomic_long_inc(&work->port->
43868+ atomic_long_inc_unchecked(&work->port->
43869 counter_group[CM_RECV_DUPLICATES].
43870 counter[CM_MRA_COUNTER]);
43871 goto out;
43872@@ -2550,7 +2550,7 @@ static int cm_mra_handler(struct cm_work *work)
43873 break;
43874 case IB_CM_MRA_REQ_RCVD:
43875 case IB_CM_MRA_REP_RCVD:
43876- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
43877+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
43878 counter[CM_MRA_COUNTER]);
43879 /* fall through */
43880 default:
43881@@ -2712,7 +2712,7 @@ static int cm_lap_handler(struct cm_work *work)
43882 case IB_CM_LAP_IDLE:
43883 break;
43884 case IB_CM_MRA_LAP_SENT:
43885- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
43886+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
43887 counter[CM_LAP_COUNTER]);
43888 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
43889 goto unlock;
43890@@ -2728,7 +2728,7 @@ static int cm_lap_handler(struct cm_work *work)
43891 cm_free_msg(msg);
43892 goto deref;
43893 case IB_CM_LAP_RCVD:
43894- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
43895+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
43896 counter[CM_LAP_COUNTER]);
43897 goto unlock;
43898 default:
43899@@ -3012,7 +3012,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
43900 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
43901 if (cur_cm_id_priv) {
43902 spin_unlock_irq(&cm.lock);
43903- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
43904+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
43905 counter[CM_SIDR_REQ_COUNTER]);
43906 goto out; /* Duplicate message. */
43907 }
43908@@ -3224,10 +3224,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
43909 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
43910 msg->retries = 1;
43911
43912- atomic_long_add(1 + msg->retries,
43913+ atomic_long_add_unchecked(1 + msg->retries,
43914 &port->counter_group[CM_XMIT].counter[attr_index]);
43915 if (msg->retries)
43916- atomic_long_add(msg->retries,
43917+ atomic_long_add_unchecked(msg->retries,
43918 &port->counter_group[CM_XMIT_RETRIES].
43919 counter[attr_index]);
43920
43921@@ -3437,7 +3437,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
43922 }
43923
43924 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
43925- atomic_long_inc(&port->counter_group[CM_RECV].
43926+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
43927 counter[attr_id - CM_ATTR_ID_OFFSET]);
43928
43929 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
43930@@ -3668,7 +3668,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
43931 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
43932
43933 return sprintf(buf, "%ld\n",
43934- atomic_long_read(&group->counter[cm_attr->index]));
43935+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
43936 }
43937
43938 static const struct sysfs_ops cm_counter_ops = {
43939diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
43940index 9f5ad7c..588cd84 100644
43941--- a/drivers/infiniband/core/fmr_pool.c
43942+++ b/drivers/infiniband/core/fmr_pool.c
43943@@ -98,8 +98,8 @@ struct ib_fmr_pool {
43944
43945 struct task_struct *thread;
43946
43947- atomic_t req_ser;
43948- atomic_t flush_ser;
43949+ atomic_unchecked_t req_ser;
43950+ atomic_unchecked_t flush_ser;
43951
43952 wait_queue_head_t force_wait;
43953 };
43954@@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
43955 struct ib_fmr_pool *pool = pool_ptr;
43956
43957 do {
43958- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
43959+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
43960 ib_fmr_batch_release(pool);
43961
43962- atomic_inc(&pool->flush_ser);
43963+ atomic_inc_unchecked(&pool->flush_ser);
43964 wake_up_interruptible(&pool->force_wait);
43965
43966 if (pool->flush_function)
43967@@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
43968 }
43969
43970 set_current_state(TASK_INTERRUPTIBLE);
43971- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
43972+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
43973 !kthread_should_stop())
43974 schedule();
43975 __set_current_state(TASK_RUNNING);
43976@@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
43977 pool->dirty_watermark = params->dirty_watermark;
43978 pool->dirty_len = 0;
43979 spin_lock_init(&pool->pool_lock);
43980- atomic_set(&pool->req_ser, 0);
43981- atomic_set(&pool->flush_ser, 0);
43982+ atomic_set_unchecked(&pool->req_ser, 0);
43983+ atomic_set_unchecked(&pool->flush_ser, 0);
43984 init_waitqueue_head(&pool->force_wait);
43985
43986 pool->thread = kthread_run(ib_fmr_cleanup_thread,
43987@@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
43988 }
43989 spin_unlock_irq(&pool->pool_lock);
43990
43991- serial = atomic_inc_return(&pool->req_ser);
43992+ serial = atomic_inc_return_unchecked(&pool->req_ser);
43993 wake_up_process(pool->thread);
43994
43995 if (wait_event_interruptible(pool->force_wait,
43996- atomic_read(&pool->flush_ser) - serial >= 0))
43997+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
43998 return -EINTR;
43999
44000 return 0;
44001@@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
44002 } else {
44003 list_add_tail(&fmr->list, &pool->dirty_list);
44004 if (++pool->dirty_len >= pool->dirty_watermark) {
44005- atomic_inc(&pool->req_ser);
44006+ atomic_inc_unchecked(&pool->req_ser);
44007 wake_up_process(pool->thread);
44008 }
44009 }
44010diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
44011index ec7a298..8742e59 100644
44012--- a/drivers/infiniband/hw/cxgb4/mem.c
44013+++ b/drivers/infiniband/hw/cxgb4/mem.c
44014@@ -249,7 +249,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
44015 int err;
44016 struct fw_ri_tpte tpt;
44017 u32 stag_idx;
44018- static atomic_t key;
44019+ static atomic_unchecked_t key;
44020
44021 if (c4iw_fatal_error(rdev))
44022 return -EIO;
44023@@ -270,7 +270,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
44024 if (rdev->stats.stag.cur > rdev->stats.stag.max)
44025 rdev->stats.stag.max = rdev->stats.stag.cur;
44026 mutex_unlock(&rdev->stats.lock);
44027- *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
44028+ *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
44029 }
44030 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
44031 __func__, stag_state, type, pdid, stag_idx);
44032diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
44033index 79b3dbc..96e5fcc 100644
44034--- a/drivers/infiniband/hw/ipath/ipath_rc.c
44035+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
44036@@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
44037 struct ib_atomic_eth *ateth;
44038 struct ipath_ack_entry *e;
44039 u64 vaddr;
44040- atomic64_t *maddr;
44041+ atomic64_unchecked_t *maddr;
44042 u64 sdata;
44043 u32 rkey;
44044 u8 next;
44045@@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
44046 IB_ACCESS_REMOTE_ATOMIC)))
44047 goto nack_acc_unlck;
44048 /* Perform atomic OP and save result. */
44049- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
44050+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
44051 sdata = be64_to_cpu(ateth->swap_data);
44052 e = &qp->s_ack_queue[qp->r_head_ack_queue];
44053 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
44054- (u64) atomic64_add_return(sdata, maddr) - sdata :
44055+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
44056 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
44057 be64_to_cpu(ateth->compare_data),
44058 sdata);
44059diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
44060index 1f95bba..9530f87 100644
44061--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
44062+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
44063@@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
44064 unsigned long flags;
44065 struct ib_wc wc;
44066 u64 sdata;
44067- atomic64_t *maddr;
44068+ atomic64_unchecked_t *maddr;
44069 enum ib_wc_status send_status;
44070
44071 /*
44072@@ -382,11 +382,11 @@ again:
44073 IB_ACCESS_REMOTE_ATOMIC)))
44074 goto acc_err;
44075 /* Perform atomic OP and save result. */
44076- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
44077+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
44078 sdata = wqe->wr.wr.atomic.compare_add;
44079 *(u64 *) sqp->s_sge.sge.vaddr =
44080 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
44081- (u64) atomic64_add_return(sdata, maddr) - sdata :
44082+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
44083 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
44084 sdata, wqe->wr.wr.atomic.swap);
44085 goto send_comp;
44086diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
44087index 82a7dd8..8fb6ba6 100644
44088--- a/drivers/infiniband/hw/mlx4/mad.c
44089+++ b/drivers/infiniband/hw/mlx4/mad.c
44090@@ -98,7 +98,7 @@ __be64 mlx4_ib_gen_node_guid(void)
44091
44092 __be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx)
44093 {
44094- return cpu_to_be64(atomic_inc_return(&ctx->tid)) |
44095+ return cpu_to_be64(atomic_inc_return_unchecked(&ctx->tid)) |
44096 cpu_to_be64(0xff00000000000000LL);
44097 }
44098
44099diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c
44100index ed327e6..ca1739e0 100644
44101--- a/drivers/infiniband/hw/mlx4/mcg.c
44102+++ b/drivers/infiniband/hw/mlx4/mcg.c
44103@@ -1041,7 +1041,7 @@ int mlx4_ib_mcg_port_init(struct mlx4_ib_demux_ctx *ctx)
44104 {
44105 char name[20];
44106
44107- atomic_set(&ctx->tid, 0);
44108+ atomic_set_unchecked(&ctx->tid, 0);
44109 sprintf(name, "mlx4_ib_mcg%d", ctx->port);
44110 ctx->mcg_wq = create_singlethread_workqueue(name);
44111 if (!ctx->mcg_wq)
44112diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
44113index 6eb743f..a7b0f6d 100644
44114--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
44115+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
44116@@ -426,7 +426,7 @@ struct mlx4_ib_demux_ctx {
44117 struct list_head mcg_mgid0_list;
44118 struct workqueue_struct *mcg_wq;
44119 struct mlx4_ib_demux_pv_ctx **tun;
44120- atomic_t tid;
44121+ atomic_unchecked_t tid;
44122 int flushing; /* flushing the work queue */
44123 };
44124
44125diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
44126index 9d3e5c1..6f166df 100644
44127--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
44128+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
44129@@ -772,7 +772,7 @@ static void mthca_setup_cmd_doorbells(struct mthca_dev *dev, u64 base)
44130 mthca_dbg(dev, "Mapped doorbell page for posting FW commands\n");
44131 }
44132
44133-int mthca_QUERY_FW(struct mthca_dev *dev)
44134+int __intentional_overflow(-1) mthca_QUERY_FW(struct mthca_dev *dev)
44135 {
44136 struct mthca_mailbox *mailbox;
44137 u32 *outbox;
44138@@ -1612,7 +1612,7 @@ int mthca_HW2SW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
44139 CMD_TIME_CLASS_B);
44140 }
44141
44142-int mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
44143+int __intentional_overflow(-1) mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
44144 int num_mtt)
44145 {
44146 return mthca_cmd(dev, mailbox->dma, num_mtt, 0, CMD_WRITE_MTT,
44147@@ -1634,7 +1634,7 @@ int mthca_MAP_EQ(struct mthca_dev *dev, u64 event_mask, int unmap,
44148 0, CMD_MAP_EQ, CMD_TIME_CLASS_B);
44149 }
44150
44151-int mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
44152+int __intentional_overflow(-1) mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
44153 int eq_num)
44154 {
44155 return mthca_cmd(dev, mailbox->dma, eq_num, 0, CMD_SW2HW_EQ,
44156@@ -1857,7 +1857,7 @@ int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn)
44157 CMD_TIME_CLASS_B);
44158 }
44159
44160-int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
44161+int __intentional_overflow(-1) mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
44162 int port, struct ib_wc *in_wc, struct ib_grh *in_grh,
44163 void *in_mad, void *response_mad)
44164 {
44165diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
44166index ded76c1..0cf0a08 100644
44167--- a/drivers/infiniband/hw/mthca/mthca_main.c
44168+++ b/drivers/infiniband/hw/mthca/mthca_main.c
44169@@ -692,7 +692,7 @@ err_close:
44170 return err;
44171 }
44172
44173-static int mthca_setup_hca(struct mthca_dev *dev)
44174+static int __intentional_overflow(-1) mthca_setup_hca(struct mthca_dev *dev)
44175 {
44176 int err;
44177
44178diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
44179index ed9a989..6aa5dc2 100644
44180--- a/drivers/infiniband/hw/mthca/mthca_mr.c
44181+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
44182@@ -81,7 +81,7 @@ struct mthca_mpt_entry {
44183 * through the bitmaps)
44184 */
44185
44186-static u32 mthca_buddy_alloc(struct mthca_buddy *buddy, int order)
44187+static u32 __intentional_overflow(-1) mthca_buddy_alloc(struct mthca_buddy *buddy, int order)
44188 {
44189 int o;
44190 int m;
44191@@ -426,7 +426,7 @@ static inline u32 adjust_key(struct mthca_dev *dev, u32 key)
44192 return key;
44193 }
44194
44195-int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
44196+int __intentional_overflow(-1) mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
44197 u64 iova, u64 total_size, u32 access, struct mthca_mr *mr)
44198 {
44199 struct mthca_mailbox *mailbox;
44200@@ -516,7 +516,7 @@ int mthca_mr_alloc_notrans(struct mthca_dev *dev, u32 pd,
44201 return mthca_mr_alloc(dev, pd, 12, 0, ~0ULL, access, mr);
44202 }
44203
44204-int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
44205+int __intentional_overflow(-1) mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
44206 u64 *buffer_list, int buffer_size_shift,
44207 int list_len, u64 iova, u64 total_size,
44208 u32 access, struct mthca_mr *mr)
44209diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
44210index 415f8e1..e34214e 100644
44211--- a/drivers/infiniband/hw/mthca/mthca_provider.c
44212+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
44213@@ -764,7 +764,7 @@ unlock:
44214 return 0;
44215 }
44216
44217-static int mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
44218+static int __intentional_overflow(-1) mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
44219 {
44220 struct mthca_dev *dev = to_mdev(ibcq->device);
44221 struct mthca_cq *cq = to_mcq(ibcq);
44222diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
44223index 3b2a6dc..bce26ff 100644
44224--- a/drivers/infiniband/hw/nes/nes.c
44225+++ b/drivers/infiniband/hw/nes/nes.c
44226@@ -97,7 +97,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
44227 LIST_HEAD(nes_adapter_list);
44228 static LIST_HEAD(nes_dev_list);
44229
44230-atomic_t qps_destroyed;
44231+atomic_unchecked_t qps_destroyed;
44232
44233 static unsigned int ee_flsh_adapter;
44234 static unsigned int sysfs_nonidx_addr;
44235@@ -278,7 +278,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
44236 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
44237 struct nes_adapter *nesadapter = nesdev->nesadapter;
44238
44239- atomic_inc(&qps_destroyed);
44240+ atomic_inc_unchecked(&qps_destroyed);
44241
44242 /* Free the control structures */
44243
44244diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
44245index bd9d132..70d84f4 100644
44246--- a/drivers/infiniband/hw/nes/nes.h
44247+++ b/drivers/infiniband/hw/nes/nes.h
44248@@ -180,17 +180,17 @@ extern unsigned int nes_debug_level;
44249 extern unsigned int wqm_quanta;
44250 extern struct list_head nes_adapter_list;
44251
44252-extern atomic_t cm_connects;
44253-extern atomic_t cm_accepts;
44254-extern atomic_t cm_disconnects;
44255-extern atomic_t cm_closes;
44256-extern atomic_t cm_connecteds;
44257-extern atomic_t cm_connect_reqs;
44258-extern atomic_t cm_rejects;
44259-extern atomic_t mod_qp_timouts;
44260-extern atomic_t qps_created;
44261-extern atomic_t qps_destroyed;
44262-extern atomic_t sw_qps_destroyed;
44263+extern atomic_unchecked_t cm_connects;
44264+extern atomic_unchecked_t cm_accepts;
44265+extern atomic_unchecked_t cm_disconnects;
44266+extern atomic_unchecked_t cm_closes;
44267+extern atomic_unchecked_t cm_connecteds;
44268+extern atomic_unchecked_t cm_connect_reqs;
44269+extern atomic_unchecked_t cm_rejects;
44270+extern atomic_unchecked_t mod_qp_timouts;
44271+extern atomic_unchecked_t qps_created;
44272+extern atomic_unchecked_t qps_destroyed;
44273+extern atomic_unchecked_t sw_qps_destroyed;
44274 extern u32 mh_detected;
44275 extern u32 mh_pauses_sent;
44276 extern u32 cm_packets_sent;
44277@@ -199,16 +199,16 @@ extern u32 cm_packets_created;
44278 extern u32 cm_packets_received;
44279 extern u32 cm_packets_dropped;
44280 extern u32 cm_packets_retrans;
44281-extern atomic_t cm_listens_created;
44282-extern atomic_t cm_listens_destroyed;
44283+extern atomic_unchecked_t cm_listens_created;
44284+extern atomic_unchecked_t cm_listens_destroyed;
44285 extern u32 cm_backlog_drops;
44286-extern atomic_t cm_loopbacks;
44287-extern atomic_t cm_nodes_created;
44288-extern atomic_t cm_nodes_destroyed;
44289-extern atomic_t cm_accel_dropped_pkts;
44290-extern atomic_t cm_resets_recvd;
44291-extern atomic_t pau_qps_created;
44292-extern atomic_t pau_qps_destroyed;
44293+extern atomic_unchecked_t cm_loopbacks;
44294+extern atomic_unchecked_t cm_nodes_created;
44295+extern atomic_unchecked_t cm_nodes_destroyed;
44296+extern atomic_unchecked_t cm_accel_dropped_pkts;
44297+extern atomic_unchecked_t cm_resets_recvd;
44298+extern atomic_unchecked_t pau_qps_created;
44299+extern atomic_unchecked_t pau_qps_destroyed;
44300
44301 extern u32 int_mod_timer_init;
44302 extern u32 int_mod_cq_depth_256;
44303diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
44304index 6f09a72..cf4399d 100644
44305--- a/drivers/infiniband/hw/nes/nes_cm.c
44306+++ b/drivers/infiniband/hw/nes/nes_cm.c
44307@@ -69,14 +69,14 @@ u32 cm_packets_dropped;
44308 u32 cm_packets_retrans;
44309 u32 cm_packets_created;
44310 u32 cm_packets_received;
44311-atomic_t cm_listens_created;
44312-atomic_t cm_listens_destroyed;
44313+atomic_unchecked_t cm_listens_created;
44314+atomic_unchecked_t cm_listens_destroyed;
44315 u32 cm_backlog_drops;
44316-atomic_t cm_loopbacks;
44317-atomic_t cm_nodes_created;
44318-atomic_t cm_nodes_destroyed;
44319-atomic_t cm_accel_dropped_pkts;
44320-atomic_t cm_resets_recvd;
44321+atomic_unchecked_t cm_loopbacks;
44322+atomic_unchecked_t cm_nodes_created;
44323+atomic_unchecked_t cm_nodes_destroyed;
44324+atomic_unchecked_t cm_accel_dropped_pkts;
44325+atomic_unchecked_t cm_resets_recvd;
44326
44327 static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
44328 static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *);
44329@@ -135,28 +135,28 @@ static void record_ird_ord(struct nes_cm_node *, u16, u16);
44330 /* instance of function pointers for client API */
44331 /* set address of this instance to cm_core->cm_ops at cm_core alloc */
44332 static struct nes_cm_ops nes_cm_api = {
44333- mini_cm_accelerated,
44334- mini_cm_listen,
44335- mini_cm_del_listen,
44336- mini_cm_connect,
44337- mini_cm_close,
44338- mini_cm_accept,
44339- mini_cm_reject,
44340- mini_cm_recv_pkt,
44341- mini_cm_dealloc_core,
44342- mini_cm_get,
44343- mini_cm_set
44344+ .accelerated = mini_cm_accelerated,
44345+ .listen = mini_cm_listen,
44346+ .stop_listener = mini_cm_del_listen,
44347+ .connect = mini_cm_connect,
44348+ .close = mini_cm_close,
44349+ .accept = mini_cm_accept,
44350+ .reject = mini_cm_reject,
44351+ .recv_pkt = mini_cm_recv_pkt,
44352+ .destroy_cm_core = mini_cm_dealloc_core,
44353+ .get = mini_cm_get,
44354+ .set = mini_cm_set
44355 };
44356
44357 static struct nes_cm_core *g_cm_core;
44358
44359-atomic_t cm_connects;
44360-atomic_t cm_accepts;
44361-atomic_t cm_disconnects;
44362-atomic_t cm_closes;
44363-atomic_t cm_connecteds;
44364-atomic_t cm_connect_reqs;
44365-atomic_t cm_rejects;
44366+atomic_unchecked_t cm_connects;
44367+atomic_unchecked_t cm_accepts;
44368+atomic_unchecked_t cm_disconnects;
44369+atomic_unchecked_t cm_closes;
44370+atomic_unchecked_t cm_connecteds;
44371+atomic_unchecked_t cm_connect_reqs;
44372+atomic_unchecked_t cm_rejects;
44373
44374 int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
44375 {
44376@@ -1436,7 +1436,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
44377 kfree(listener);
44378 listener = NULL;
44379 ret = 0;
44380- atomic_inc(&cm_listens_destroyed);
44381+ atomic_inc_unchecked(&cm_listens_destroyed);
44382 } else {
44383 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
44384 }
44385@@ -1637,7 +1637,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
44386 cm_node->rem_mac);
44387
44388 add_hte_node(cm_core, cm_node);
44389- atomic_inc(&cm_nodes_created);
44390+ atomic_inc_unchecked(&cm_nodes_created);
44391
44392 return cm_node;
44393 }
44394@@ -1698,7 +1698,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
44395 }
44396
44397 atomic_dec(&cm_core->node_cnt);
44398- atomic_inc(&cm_nodes_destroyed);
44399+ atomic_inc_unchecked(&cm_nodes_destroyed);
44400 nesqp = cm_node->nesqp;
44401 if (nesqp) {
44402 nesqp->cm_node = NULL;
44403@@ -1762,7 +1762,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
44404
44405 static void drop_packet(struct sk_buff *skb)
44406 {
44407- atomic_inc(&cm_accel_dropped_pkts);
44408+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
44409 dev_kfree_skb_any(skb);
44410 }
44411
44412@@ -1825,7 +1825,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
44413 {
44414
44415 int reset = 0; /* whether to send reset in case of err.. */
44416- atomic_inc(&cm_resets_recvd);
44417+ atomic_inc_unchecked(&cm_resets_recvd);
44418 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
44419 " refcnt=%d\n", cm_node, cm_node->state,
44420 atomic_read(&cm_node->ref_count));
44421@@ -2492,7 +2492,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
44422 rem_ref_cm_node(cm_node->cm_core, cm_node);
44423 return NULL;
44424 }
44425- atomic_inc(&cm_loopbacks);
44426+ atomic_inc_unchecked(&cm_loopbacks);
44427 loopbackremotenode->loopbackpartner = cm_node;
44428 loopbackremotenode->tcp_cntxt.rcv_wscale =
44429 NES_CM_DEFAULT_RCV_WND_SCALE;
44430@@ -2773,7 +2773,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
44431 nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
44432 else {
44433 rem_ref_cm_node(cm_core, cm_node);
44434- atomic_inc(&cm_accel_dropped_pkts);
44435+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
44436 dev_kfree_skb_any(skb);
44437 }
44438 break;
44439@@ -3081,7 +3081,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
44440
44441 if ((cm_id) && (cm_id->event_handler)) {
44442 if (issue_disconn) {
44443- atomic_inc(&cm_disconnects);
44444+ atomic_inc_unchecked(&cm_disconnects);
44445 cm_event.event = IW_CM_EVENT_DISCONNECT;
44446 cm_event.status = disconn_status;
44447 cm_event.local_addr = cm_id->local_addr;
44448@@ -3103,7 +3103,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
44449 }
44450
44451 if (issue_close) {
44452- atomic_inc(&cm_closes);
44453+ atomic_inc_unchecked(&cm_closes);
44454 nes_disconnect(nesqp, 1);
44455
44456 cm_id->provider_data = nesqp;
44457@@ -3241,7 +3241,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
44458
44459 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
44460 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
44461- atomic_inc(&cm_accepts);
44462+ atomic_inc_unchecked(&cm_accepts);
44463
44464 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
44465 netdev_refcnt_read(nesvnic->netdev));
44466@@ -3439,7 +3439,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
44467 struct nes_cm_core *cm_core;
44468 u8 *start_buff;
44469
44470- atomic_inc(&cm_rejects);
44471+ atomic_inc_unchecked(&cm_rejects);
44472 cm_node = (struct nes_cm_node *)cm_id->provider_data;
44473 loopback = cm_node->loopbackpartner;
44474 cm_core = cm_node->cm_core;
44475@@ -3504,7 +3504,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
44476 ntohs(raddr->sin_port), ntohl(laddr->sin_addr.s_addr),
44477 ntohs(laddr->sin_port));
44478
44479- atomic_inc(&cm_connects);
44480+ atomic_inc_unchecked(&cm_connects);
44481 nesqp->active_conn = 1;
44482
44483 /* cache the cm_id in the qp */
44484@@ -3649,7 +3649,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
44485 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
44486 return err;
44487 }
44488- atomic_inc(&cm_listens_created);
44489+ atomic_inc_unchecked(&cm_listens_created);
44490 }
44491
44492 cm_id->add_ref(cm_id);
44493@@ -3756,7 +3756,7 @@ static void cm_event_connected(struct nes_cm_event *event)
44494
44495 if (nesqp->destroyed)
44496 return;
44497- atomic_inc(&cm_connecteds);
44498+ atomic_inc_unchecked(&cm_connecteds);
44499 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
44500 " local port 0x%04X. jiffies = %lu.\n",
44501 nesqp->hwqp.qp_id, ntohl(raddr->sin_addr.s_addr),
44502@@ -3941,7 +3941,7 @@ static void cm_event_reset(struct nes_cm_event *event)
44503
44504 cm_id->add_ref(cm_id);
44505 ret = cm_id->event_handler(cm_id, &cm_event);
44506- atomic_inc(&cm_closes);
44507+ atomic_inc_unchecked(&cm_closes);
44508 cm_event.event = IW_CM_EVENT_CLOSE;
44509 cm_event.status = 0;
44510 cm_event.provider_data = cm_id->provider_data;
44511@@ -3981,7 +3981,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
44512 return;
44513 cm_id = cm_node->cm_id;
44514
44515- atomic_inc(&cm_connect_reqs);
44516+ atomic_inc_unchecked(&cm_connect_reqs);
44517 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
44518 cm_node, cm_id, jiffies);
44519
44520@@ -4030,7 +4030,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
44521 return;
44522 cm_id = cm_node->cm_id;
44523
44524- atomic_inc(&cm_connect_reqs);
44525+ atomic_inc_unchecked(&cm_connect_reqs);
44526 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
44527 cm_node, cm_id, jiffies);
44528
44529diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
44530index 4166452..fc952c3 100644
44531--- a/drivers/infiniband/hw/nes/nes_mgt.c
44532+++ b/drivers/infiniband/hw/nes/nes_mgt.c
44533@@ -40,8 +40,8 @@
44534 #include "nes.h"
44535 #include "nes_mgt.h"
44536
44537-atomic_t pau_qps_created;
44538-atomic_t pau_qps_destroyed;
44539+atomic_unchecked_t pau_qps_created;
44540+atomic_unchecked_t pau_qps_destroyed;
44541
44542 static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
44543 {
44544@@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
44545 {
44546 struct sk_buff *skb;
44547 unsigned long flags;
44548- atomic_inc(&pau_qps_destroyed);
44549+ atomic_inc_unchecked(&pau_qps_destroyed);
44550
44551 /* Free packets that have not yet been forwarded */
44552 /* Lock is acquired by skb_dequeue when removing the skb */
44553@@ -810,7 +810,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *
44554 cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
44555 skb_queue_head_init(&nesqp->pau_list);
44556 spin_lock_init(&nesqp->pau_lock);
44557- atomic_inc(&pau_qps_created);
44558+ atomic_inc_unchecked(&pau_qps_created);
44559 nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
44560 }
44561
44562diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
44563index 49eb511..a774366 100644
44564--- a/drivers/infiniband/hw/nes/nes_nic.c
44565+++ b/drivers/infiniband/hw/nes/nes_nic.c
44566@@ -1273,39 +1273,39 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
44567 target_stat_values[++index] = mh_detected;
44568 target_stat_values[++index] = mh_pauses_sent;
44569 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
44570- target_stat_values[++index] = atomic_read(&cm_connects);
44571- target_stat_values[++index] = atomic_read(&cm_accepts);
44572- target_stat_values[++index] = atomic_read(&cm_disconnects);
44573- target_stat_values[++index] = atomic_read(&cm_connecteds);
44574- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
44575- target_stat_values[++index] = atomic_read(&cm_rejects);
44576- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
44577- target_stat_values[++index] = atomic_read(&qps_created);
44578- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
44579- target_stat_values[++index] = atomic_read(&qps_destroyed);
44580- target_stat_values[++index] = atomic_read(&cm_closes);
44581+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
44582+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
44583+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
44584+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
44585+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
44586+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
44587+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
44588+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
44589+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
44590+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
44591+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
44592 target_stat_values[++index] = cm_packets_sent;
44593 target_stat_values[++index] = cm_packets_bounced;
44594 target_stat_values[++index] = cm_packets_created;
44595 target_stat_values[++index] = cm_packets_received;
44596 target_stat_values[++index] = cm_packets_dropped;
44597 target_stat_values[++index] = cm_packets_retrans;
44598- target_stat_values[++index] = atomic_read(&cm_listens_created);
44599- target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
44600+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
44601+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
44602 target_stat_values[++index] = cm_backlog_drops;
44603- target_stat_values[++index] = atomic_read(&cm_loopbacks);
44604- target_stat_values[++index] = atomic_read(&cm_nodes_created);
44605- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
44606- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
44607- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
44608+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
44609+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
44610+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
44611+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
44612+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
44613 target_stat_values[++index] = nesadapter->free_4kpbl;
44614 target_stat_values[++index] = nesadapter->free_256pbl;
44615 target_stat_values[++index] = int_mod_timer_init;
44616 target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
44617 target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
44618 target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
44619- target_stat_values[++index] = atomic_read(&pau_qps_created);
44620- target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
44621+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created);
44622+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed);
44623 }
44624
44625 /**
44626diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
44627index fef067c..6a25ccd 100644
44628--- a/drivers/infiniband/hw/nes/nes_verbs.c
44629+++ b/drivers/infiniband/hw/nes/nes_verbs.c
44630@@ -46,9 +46,9 @@
44631
44632 #include <rdma/ib_umem.h>
44633
44634-atomic_t mod_qp_timouts;
44635-atomic_t qps_created;
44636-atomic_t sw_qps_destroyed;
44637+atomic_unchecked_t mod_qp_timouts;
44638+atomic_unchecked_t qps_created;
44639+atomic_unchecked_t sw_qps_destroyed;
44640
44641 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
44642
44643@@ -1134,7 +1134,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
44644 if (init_attr->create_flags)
44645 return ERR_PTR(-EINVAL);
44646
44647- atomic_inc(&qps_created);
44648+ atomic_inc_unchecked(&qps_created);
44649 switch (init_attr->qp_type) {
44650 case IB_QPT_RC:
44651 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
44652@@ -1468,7 +1468,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
44653 struct iw_cm_event cm_event;
44654 int ret = 0;
44655
44656- atomic_inc(&sw_qps_destroyed);
44657+ atomic_inc_unchecked(&sw_qps_destroyed);
44658 nesqp->destroyed = 1;
44659
44660 /* Blow away the connection if it exists. */
44661diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
44662index c00ae09..04e91be 100644
44663--- a/drivers/infiniband/hw/qib/qib.h
44664+++ b/drivers/infiniband/hw/qib/qib.h
44665@@ -52,6 +52,7 @@
44666 #include <linux/kref.h>
44667 #include <linux/sched.h>
44668 #include <linux/kthread.h>
44669+#include <linux/slab.h>
44670
44671 #include "qib_common.h"
44672 #include "qib_verbs.h"
44673diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
44674index 24c41ba..102d71f 100644
44675--- a/drivers/input/gameport/gameport.c
44676+++ b/drivers/input/gameport/gameport.c
44677@@ -490,14 +490,14 @@ EXPORT_SYMBOL(gameport_set_phys);
44678 */
44679 static void gameport_init_port(struct gameport *gameport)
44680 {
44681- static atomic_t gameport_no = ATOMIC_INIT(0);
44682+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
44683
44684 __module_get(THIS_MODULE);
44685
44686 mutex_init(&gameport->drv_mutex);
44687 device_initialize(&gameport->dev);
44688 dev_set_name(&gameport->dev, "gameport%lu",
44689- (unsigned long)atomic_inc_return(&gameport_no) - 1);
44690+ (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
44691 gameport->dev.bus = &gameport_bus;
44692 gameport->dev.release = gameport_release_port;
44693 if (gameport->parent)
44694diff --git a/drivers/input/input.c b/drivers/input/input.c
44695index 29ca0bb..f4bc2e3 100644
44696--- a/drivers/input/input.c
44697+++ b/drivers/input/input.c
44698@@ -1774,7 +1774,7 @@ EXPORT_SYMBOL_GPL(input_class);
44699 */
44700 struct input_dev *input_allocate_device(void)
44701 {
44702- static atomic_t input_no = ATOMIC_INIT(0);
44703+ static atomic_unchecked_t input_no = ATOMIC_INIT(0);
44704 struct input_dev *dev;
44705
44706 dev = kzalloc(sizeof(struct input_dev), GFP_KERNEL);
44707@@ -1789,7 +1789,7 @@ struct input_dev *input_allocate_device(void)
44708 INIT_LIST_HEAD(&dev->node);
44709
44710 dev_set_name(&dev->dev, "input%ld",
44711- (unsigned long) atomic_inc_return(&input_no) - 1);
44712+ (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
44713
44714 __module_get(THIS_MODULE);
44715 }
44716diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
44717index 4a95b22..874c182 100644
44718--- a/drivers/input/joystick/sidewinder.c
44719+++ b/drivers/input/joystick/sidewinder.c
44720@@ -30,6 +30,7 @@
44721 #include <linux/kernel.h>
44722 #include <linux/module.h>
44723 #include <linux/slab.h>
44724+#include <linux/sched.h>
44725 #include <linux/input.h>
44726 #include <linux/gameport.h>
44727 #include <linux/jiffies.h>
44728diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
44729index 177602c..ec78499 100644
44730--- a/drivers/input/joystick/xpad.c
44731+++ b/drivers/input/joystick/xpad.c
44732@@ -850,7 +850,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
44733
44734 static int xpad_led_probe(struct usb_xpad *xpad)
44735 {
44736- static atomic_t led_seq = ATOMIC_INIT(0);
44737+ static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
44738 long led_no;
44739 struct xpad_led *led;
44740 struct led_classdev *led_cdev;
44741@@ -863,7 +863,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
44742 if (!led)
44743 return -ENOMEM;
44744
44745- led_no = (long)atomic_inc_return(&led_seq) - 1;
44746+ led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
44747
44748 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
44749 led->xpad = xpad;
44750diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
44751index 719410f..1896169 100644
44752--- a/drivers/input/misc/ims-pcu.c
44753+++ b/drivers/input/misc/ims-pcu.c
44754@@ -1851,7 +1851,7 @@ static int ims_pcu_identify_type(struct ims_pcu *pcu, u8 *device_id)
44755
44756 static int ims_pcu_init_application_mode(struct ims_pcu *pcu)
44757 {
44758- static atomic_t device_no = ATOMIC_INIT(0);
44759+ static atomic_unchecked_t device_no = ATOMIC_INIT(0);
44760
44761 const struct ims_pcu_device_info *info;
44762 int error;
44763@@ -1882,7 +1882,7 @@ static int ims_pcu_init_application_mode(struct ims_pcu *pcu)
44764 }
44765
44766 /* Device appears to be operable, complete initialization */
44767- pcu->device_no = atomic_inc_return(&device_no) - 1;
44768+ pcu->device_no = atomic_inc_return_unchecked(&device_no) - 1;
44769
44770 /*
44771 * PCU-B devices, both GEN_1 and GEN_2 do not have OFN sensor
44772diff --git a/drivers/input/mouse/psmouse.h b/drivers/input/mouse/psmouse.h
44773index 2f0b39d..7370f13 100644
44774--- a/drivers/input/mouse/psmouse.h
44775+++ b/drivers/input/mouse/psmouse.h
44776@@ -116,7 +116,7 @@ struct psmouse_attribute {
44777 ssize_t (*set)(struct psmouse *psmouse, void *data,
44778 const char *buf, size_t count);
44779 bool protect;
44780-};
44781+} __do_const;
44782 #define to_psmouse_attr(a) container_of((a), struct psmouse_attribute, dattr)
44783
44784 ssize_t psmouse_attr_show_helper(struct device *dev, struct device_attribute *attr,
44785diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
44786index b604564..3f14ae4 100644
44787--- a/drivers/input/mousedev.c
44788+++ b/drivers/input/mousedev.c
44789@@ -744,7 +744,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
44790
44791 spin_unlock_irq(&client->packet_lock);
44792
44793- if (copy_to_user(buffer, data, count))
44794+ if (count > sizeof(data) || copy_to_user(buffer, data, count))
44795 return -EFAULT;
44796
44797 return count;
44798diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
44799index b29134d..394deb0 100644
44800--- a/drivers/input/serio/serio.c
44801+++ b/drivers/input/serio/serio.c
44802@@ -514,7 +514,7 @@ static void serio_release_port(struct device *dev)
44803 */
44804 static void serio_init_port(struct serio *serio)
44805 {
44806- static atomic_t serio_no = ATOMIC_INIT(0);
44807+ static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
44808
44809 __module_get(THIS_MODULE);
44810
44811@@ -525,7 +525,7 @@ static void serio_init_port(struct serio *serio)
44812 mutex_init(&serio->drv_mutex);
44813 device_initialize(&serio->dev);
44814 dev_set_name(&serio->dev, "serio%ld",
44815- (long)atomic_inc_return(&serio_no) - 1);
44816+ (long)atomic_inc_return_unchecked(&serio_no) - 1);
44817 serio->dev.bus = &serio_bus;
44818 serio->dev.release = serio_release_port;
44819 serio->dev.groups = serio_device_attr_groups;
44820diff --git a/drivers/input/serio/serio_raw.c b/drivers/input/serio/serio_raw.c
44821index c9a02fe..0debc75 100644
44822--- a/drivers/input/serio/serio_raw.c
44823+++ b/drivers/input/serio/serio_raw.c
44824@@ -292,7 +292,7 @@ static irqreturn_t serio_raw_interrupt(struct serio *serio, unsigned char data,
44825
44826 static int serio_raw_connect(struct serio *serio, struct serio_driver *drv)
44827 {
44828- static atomic_t serio_raw_no = ATOMIC_INIT(0);
44829+ static atomic_unchecked_t serio_raw_no = ATOMIC_INIT(0);
44830 struct serio_raw *serio_raw;
44831 int err;
44832
44833@@ -303,7 +303,7 @@ static int serio_raw_connect(struct serio *serio, struct serio_driver *drv)
44834 }
44835
44836 snprintf(serio_raw->name, sizeof(serio_raw->name),
44837- "serio_raw%ld", (long)atomic_inc_return(&serio_raw_no) - 1);
44838+ "serio_raw%ld", (long)atomic_inc_return_unchecked(&serio_raw_no) - 1);
44839 kref_init(&serio_raw->kref);
44840 INIT_LIST_HEAD(&serio_raw->client_list);
44841 init_waitqueue_head(&serio_raw->wait);
44842diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
44843index a83cc2a..64462e6 100644
44844--- a/drivers/iommu/arm-smmu.c
44845+++ b/drivers/iommu/arm-smmu.c
44846@@ -921,7 +921,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
44847 cfg->irptndx = cfg->cbndx;
44848 }
44849
44850- ACCESS_ONCE(smmu_domain->smmu) = smmu;
44851+ ACCESS_ONCE_RW(smmu_domain->smmu) = smmu;
44852 arm_smmu_init_context_bank(smmu_domain);
44853 spin_unlock_irqrestore(&smmu_domain->lock, flags);
44854
44855diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
44856index 33c4395..e06447e 100644
44857--- a/drivers/iommu/irq_remapping.c
44858+++ b/drivers/iommu/irq_remapping.c
44859@@ -354,7 +354,7 @@ int setup_hpet_msi_remapped(unsigned int irq, unsigned int id)
44860 void panic_if_irq_remap(const char *msg)
44861 {
44862 if (irq_remapping_enabled)
44863- panic(msg);
44864+ panic("%s", msg);
44865 }
44866
44867 static void ir_ack_apic_edge(struct irq_data *data)
44868@@ -375,10 +375,12 @@ static void ir_print_prefix(struct irq_data *data, struct seq_file *p)
44869
44870 void irq_remap_modify_chip_defaults(struct irq_chip *chip)
44871 {
44872- chip->irq_print_chip = ir_print_prefix;
44873- chip->irq_ack = ir_ack_apic_edge;
44874- chip->irq_eoi = ir_ack_apic_level;
44875- chip->irq_set_affinity = x86_io_apic_ops.set_affinity;
44876+ pax_open_kernel();
44877+ *(void **)&chip->irq_print_chip = ir_print_prefix;
44878+ *(void **)&chip->irq_ack = ir_ack_apic_edge;
44879+ *(void **)&chip->irq_eoi = ir_ack_apic_level;
44880+ *(void **)&chip->irq_set_affinity = x86_io_apic_ops.set_affinity;
44881+ pax_close_kernel();
44882 }
44883
44884 bool setup_remapped_irq(int irq, struct irq_cfg *cfg, struct irq_chip *chip)
44885diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
44886index dda6dbc..f9adebb 100644
44887--- a/drivers/irqchip/irq-gic.c
44888+++ b/drivers/irqchip/irq-gic.c
44889@@ -84,7 +84,7 @@ static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly;
44890 * Supported arch specific GIC irq extension.
44891 * Default make them NULL.
44892 */
44893-struct irq_chip gic_arch_extn = {
44894+irq_chip_no_const gic_arch_extn = {
44895 .irq_eoi = NULL,
44896 .irq_mask = NULL,
44897 .irq_unmask = NULL,
44898@@ -312,7 +312,7 @@ static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
44899 chained_irq_exit(chip, desc);
44900 }
44901
44902-static struct irq_chip gic_chip = {
44903+static irq_chip_no_const gic_chip __read_only = {
44904 .name = "GIC",
44905 .irq_mask = gic_mask_irq,
44906 .irq_unmask = gic_unmask_irq,
44907diff --git a/drivers/irqchip/irq-renesas-irqc.c b/drivers/irqchip/irq-renesas-irqc.c
44908index 8777065..a4a9967 100644
44909--- a/drivers/irqchip/irq-renesas-irqc.c
44910+++ b/drivers/irqchip/irq-renesas-irqc.c
44911@@ -151,7 +151,7 @@ static int irqc_probe(struct platform_device *pdev)
44912 struct irqc_priv *p;
44913 struct resource *io;
44914 struct resource *irq;
44915- struct irq_chip *irq_chip;
44916+ irq_chip_no_const *irq_chip;
44917 const char *name = dev_name(&pdev->dev);
44918 int ret;
44919 int k;
44920diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
44921index 6a2df32..dc962f1 100644
44922--- a/drivers/isdn/capi/capi.c
44923+++ b/drivers/isdn/capi/capi.c
44924@@ -81,8 +81,8 @@ struct capiminor {
44925
44926 struct capi20_appl *ap;
44927 u32 ncci;
44928- atomic_t datahandle;
44929- atomic_t msgid;
44930+ atomic_unchecked_t datahandle;
44931+ atomic_unchecked_t msgid;
44932
44933 struct tty_port port;
44934 int ttyinstop;
44935@@ -391,7 +391,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
44936 capimsg_setu16(s, 2, mp->ap->applid);
44937 capimsg_setu8 (s, 4, CAPI_DATA_B3);
44938 capimsg_setu8 (s, 5, CAPI_RESP);
44939- capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
44940+ capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
44941 capimsg_setu32(s, 8, mp->ncci);
44942 capimsg_setu16(s, 12, datahandle);
44943 }
44944@@ -512,14 +512,14 @@ static void handle_minor_send(struct capiminor *mp)
44945 mp->outbytes -= len;
44946 spin_unlock_bh(&mp->outlock);
44947
44948- datahandle = atomic_inc_return(&mp->datahandle);
44949+ datahandle = atomic_inc_return_unchecked(&mp->datahandle);
44950 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
44951 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
44952 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
44953 capimsg_setu16(skb->data, 2, mp->ap->applid);
44954 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
44955 capimsg_setu8 (skb->data, 5, CAPI_REQ);
44956- capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
44957+ capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
44958 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
44959 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
44960 capimsg_setu16(skb->data, 16, len); /* Data length */
44961diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
44962index b7ae0a0..04590fa 100644
44963--- a/drivers/isdn/gigaset/bas-gigaset.c
44964+++ b/drivers/isdn/gigaset/bas-gigaset.c
44965@@ -2565,22 +2565,22 @@ static int gigaset_post_reset(struct usb_interface *intf)
44966
44967
44968 static const struct gigaset_ops gigops = {
44969- gigaset_write_cmd,
44970- gigaset_write_room,
44971- gigaset_chars_in_buffer,
44972- gigaset_brkchars,
44973- gigaset_init_bchannel,
44974- gigaset_close_bchannel,
44975- gigaset_initbcshw,
44976- gigaset_freebcshw,
44977- gigaset_reinitbcshw,
44978- gigaset_initcshw,
44979- gigaset_freecshw,
44980- gigaset_set_modem_ctrl,
44981- gigaset_baud_rate,
44982- gigaset_set_line_ctrl,
44983- gigaset_isoc_send_skb,
44984- gigaset_isoc_input,
44985+ .write_cmd = gigaset_write_cmd,
44986+ .write_room = gigaset_write_room,
44987+ .chars_in_buffer = gigaset_chars_in_buffer,
44988+ .brkchars = gigaset_brkchars,
44989+ .init_bchannel = gigaset_init_bchannel,
44990+ .close_bchannel = gigaset_close_bchannel,
44991+ .initbcshw = gigaset_initbcshw,
44992+ .freebcshw = gigaset_freebcshw,
44993+ .reinitbcshw = gigaset_reinitbcshw,
44994+ .initcshw = gigaset_initcshw,
44995+ .freecshw = gigaset_freecshw,
44996+ .set_modem_ctrl = gigaset_set_modem_ctrl,
44997+ .baud_rate = gigaset_baud_rate,
44998+ .set_line_ctrl = gigaset_set_line_ctrl,
44999+ .send_skb = gigaset_isoc_send_skb,
45000+ .handle_input = gigaset_isoc_input,
45001 };
45002
45003 /* bas_gigaset_init
45004diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
45005index 600c79b..3752bab 100644
45006--- a/drivers/isdn/gigaset/interface.c
45007+++ b/drivers/isdn/gigaset/interface.c
45008@@ -130,9 +130,9 @@ static int if_open(struct tty_struct *tty, struct file *filp)
45009 }
45010 tty->driver_data = cs;
45011
45012- ++cs->port.count;
45013+ atomic_inc(&cs->port.count);
45014
45015- if (cs->port.count == 1) {
45016+ if (atomic_read(&cs->port.count) == 1) {
45017 tty_port_tty_set(&cs->port, tty);
45018 cs->port.low_latency = 1;
45019 }
45020@@ -156,9 +156,9 @@ static void if_close(struct tty_struct *tty, struct file *filp)
45021
45022 if (!cs->connected)
45023 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
45024- else if (!cs->port.count)
45025+ else if (!atomic_read(&cs->port.count))
45026 dev_warn(cs->dev, "%s: device not opened\n", __func__);
45027- else if (!--cs->port.count)
45028+ else if (!atomic_dec_return(&cs->port.count))
45029 tty_port_tty_set(&cs->port, NULL);
45030
45031 mutex_unlock(&cs->mutex);
45032diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c
45033index 8c91fd5..14f13ce 100644
45034--- a/drivers/isdn/gigaset/ser-gigaset.c
45035+++ b/drivers/isdn/gigaset/ser-gigaset.c
45036@@ -453,22 +453,22 @@ static int gigaset_set_line_ctrl(struct cardstate *cs, unsigned cflag)
45037 }
45038
45039 static const struct gigaset_ops ops = {
45040- gigaset_write_cmd,
45041- gigaset_write_room,
45042- gigaset_chars_in_buffer,
45043- gigaset_brkchars,
45044- gigaset_init_bchannel,
45045- gigaset_close_bchannel,
45046- gigaset_initbcshw,
45047- gigaset_freebcshw,
45048- gigaset_reinitbcshw,
45049- gigaset_initcshw,
45050- gigaset_freecshw,
45051- gigaset_set_modem_ctrl,
45052- gigaset_baud_rate,
45053- gigaset_set_line_ctrl,
45054- gigaset_m10x_send_skb, /* asyncdata.c */
45055- gigaset_m10x_input, /* asyncdata.c */
45056+ .write_cmd = gigaset_write_cmd,
45057+ .write_room = gigaset_write_room,
45058+ .chars_in_buffer = gigaset_chars_in_buffer,
45059+ .brkchars = gigaset_brkchars,
45060+ .init_bchannel = gigaset_init_bchannel,
45061+ .close_bchannel = gigaset_close_bchannel,
45062+ .initbcshw = gigaset_initbcshw,
45063+ .freebcshw = gigaset_freebcshw,
45064+ .reinitbcshw = gigaset_reinitbcshw,
45065+ .initcshw = gigaset_initcshw,
45066+ .freecshw = gigaset_freecshw,
45067+ .set_modem_ctrl = gigaset_set_modem_ctrl,
45068+ .baud_rate = gigaset_baud_rate,
45069+ .set_line_ctrl = gigaset_set_line_ctrl,
45070+ .send_skb = gigaset_m10x_send_skb, /* asyncdata.c */
45071+ .handle_input = gigaset_m10x_input, /* asyncdata.c */
45072 };
45073
45074
45075diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c
45076index d0a41cb..b953e50 100644
45077--- a/drivers/isdn/gigaset/usb-gigaset.c
45078+++ b/drivers/isdn/gigaset/usb-gigaset.c
45079@@ -547,7 +547,7 @@ static int gigaset_brkchars(struct cardstate *cs, const unsigned char buf[6])
45080 gigaset_dbg_buffer(DEBUG_USBREQ, "brkchars", 6, buf);
45081 memcpy(cs->hw.usb->bchars, buf, 6);
45082 return usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x19, 0x41,
45083- 0, 0, &buf, 6, 2000);
45084+ 0, 0, buf, 6, 2000);
45085 }
45086
45087 static void gigaset_freebcshw(struct bc_state *bcs)
45088@@ -869,22 +869,22 @@ static int gigaset_pre_reset(struct usb_interface *intf)
45089 }
45090
45091 static const struct gigaset_ops ops = {
45092- gigaset_write_cmd,
45093- gigaset_write_room,
45094- gigaset_chars_in_buffer,
45095- gigaset_brkchars,
45096- gigaset_init_bchannel,
45097- gigaset_close_bchannel,
45098- gigaset_initbcshw,
45099- gigaset_freebcshw,
45100- gigaset_reinitbcshw,
45101- gigaset_initcshw,
45102- gigaset_freecshw,
45103- gigaset_set_modem_ctrl,
45104- gigaset_baud_rate,
45105- gigaset_set_line_ctrl,
45106- gigaset_m10x_send_skb,
45107- gigaset_m10x_input,
45108+ .write_cmd = gigaset_write_cmd,
45109+ .write_room = gigaset_write_room,
45110+ .chars_in_buffer = gigaset_chars_in_buffer,
45111+ .brkchars = gigaset_brkchars,
45112+ .init_bchannel = gigaset_init_bchannel,
45113+ .close_bchannel = gigaset_close_bchannel,
45114+ .initbcshw = gigaset_initbcshw,
45115+ .freebcshw = gigaset_freebcshw,
45116+ .reinitbcshw = gigaset_reinitbcshw,
45117+ .initcshw = gigaset_initcshw,
45118+ .freecshw = gigaset_freecshw,
45119+ .set_modem_ctrl = gigaset_set_modem_ctrl,
45120+ .baud_rate = gigaset_baud_rate,
45121+ .set_line_ctrl = gigaset_set_line_ctrl,
45122+ .send_skb = gigaset_m10x_send_skb,
45123+ .handle_input = gigaset_m10x_input,
45124 };
45125
45126 /*
45127diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
45128index 4d9b195..455075c 100644
45129--- a/drivers/isdn/hardware/avm/b1.c
45130+++ b/drivers/isdn/hardware/avm/b1.c
45131@@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart *t4file)
45132 }
45133 if (left) {
45134 if (t4file->user) {
45135- if (copy_from_user(buf, dp, left))
45136+ if (left > sizeof buf || copy_from_user(buf, dp, left))
45137 return -EFAULT;
45138 } else {
45139 memcpy(buf, dp, left);
45140@@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart *config)
45141 }
45142 if (left) {
45143 if (config->user) {
45144- if (copy_from_user(buf, dp, left))
45145+ if (left > sizeof buf || copy_from_user(buf, dp, left))
45146 return -EFAULT;
45147 } else {
45148 memcpy(buf, dp, left);
45149diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
45150index 9b856e1..fa03c92 100644
45151--- a/drivers/isdn/i4l/isdn_common.c
45152+++ b/drivers/isdn/i4l/isdn_common.c
45153@@ -1654,6 +1654,8 @@ isdn_ioctl(struct file *file, uint cmd, ulong arg)
45154 } else
45155 return -EINVAL;
45156 case IIOCDBGVAR:
45157+ if (!capable(CAP_SYS_RAWIO))
45158+ return -EPERM;
45159 if (arg) {
45160 if (copy_to_user(argp, &dev, sizeof(ulong)))
45161 return -EFAULT;
45162diff --git a/drivers/isdn/i4l/isdn_concap.c b/drivers/isdn/i4l/isdn_concap.c
45163index 91d5730..336523e 100644
45164--- a/drivers/isdn/i4l/isdn_concap.c
45165+++ b/drivers/isdn/i4l/isdn_concap.c
45166@@ -80,9 +80,9 @@ static int isdn_concap_dl_disconn_req(struct concap_proto *concap)
45167 }
45168
45169 struct concap_device_ops isdn_concap_reliable_dl_dops = {
45170- &isdn_concap_dl_data_req,
45171- &isdn_concap_dl_connect_req,
45172- &isdn_concap_dl_disconn_req
45173+ .data_req = &isdn_concap_dl_data_req,
45174+ .connect_req = &isdn_concap_dl_connect_req,
45175+ .disconn_req = &isdn_concap_dl_disconn_req
45176 };
45177
45178 /* The following should better go into a dedicated source file such that
45179diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
45180index 3c5f249..5fac4d0 100644
45181--- a/drivers/isdn/i4l/isdn_tty.c
45182+++ b/drivers/isdn/i4l/isdn_tty.c
45183@@ -1508,9 +1508,9 @@ isdn_tty_open(struct tty_struct *tty, struct file *filp)
45184
45185 #ifdef ISDN_DEBUG_MODEM_OPEN
45186 printk(KERN_DEBUG "isdn_tty_open %s, count = %d\n", tty->name,
45187- port->count);
45188+ atomic_read(&port->count));
45189 #endif
45190- port->count++;
45191+ atomic_inc(&port->count);
45192 port->tty = tty;
45193 /*
45194 * Start up serial port
45195@@ -1554,7 +1554,7 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
45196 #endif
45197 return;
45198 }
45199- if ((tty->count == 1) && (port->count != 1)) {
45200+ if ((tty->count == 1) && (atomic_read(&port->count) != 1)) {
45201 /*
45202 * Uh, oh. tty->count is 1, which means that the tty
45203 * structure will be freed. Info->count should always
45204@@ -1563,15 +1563,15 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
45205 * serial port won't be shutdown.
45206 */
45207 printk(KERN_ERR "isdn_tty_close: bad port count; tty->count is 1, "
45208- "info->count is %d\n", port->count);
45209- port->count = 1;
45210+ "info->count is %d\n", atomic_read(&port->count));
45211+ atomic_set(&port->count, 1);
45212 }
45213- if (--port->count < 0) {
45214+ if (atomic_dec_return(&port->count) < 0) {
45215 printk(KERN_ERR "isdn_tty_close: bad port count for ttyi%d: %d\n",
45216- info->line, port->count);
45217- port->count = 0;
45218+ info->line, atomic_read(&port->count));
45219+ atomic_set(&port->count, 0);
45220 }
45221- if (port->count) {
45222+ if (atomic_read(&port->count)) {
45223 #ifdef ISDN_DEBUG_MODEM_OPEN
45224 printk(KERN_DEBUG "isdn_tty_close after info->count != 0\n");
45225 #endif
45226@@ -1625,7 +1625,7 @@ isdn_tty_hangup(struct tty_struct *tty)
45227 if (isdn_tty_paranoia_check(info, tty->name, "isdn_tty_hangup"))
45228 return;
45229 isdn_tty_shutdown(info);
45230- port->count = 0;
45231+ atomic_set(&port->count, 0);
45232 port->flags &= ~ASYNC_NORMAL_ACTIVE;
45233 port->tty = NULL;
45234 wake_up_interruptible(&port->open_wait);
45235@@ -1970,7 +1970,7 @@ isdn_tty_find_icall(int di, int ch, setup_parm *setup)
45236 for (i = 0; i < ISDN_MAX_CHANNELS; i++) {
45237 modem_info *info = &dev->mdm.info[i];
45238
45239- if (info->port.count == 0)
45240+ if (atomic_read(&info->port.count) == 0)
45241 continue;
45242 if ((info->emu.mdmreg[REG_SI1] & si2bit[si1]) && /* SI1 is matching */
45243 (info->emu.mdmreg[REG_SI2] == si2)) { /* SI2 is matching */
45244diff --git a/drivers/isdn/i4l/isdn_x25iface.c b/drivers/isdn/i4l/isdn_x25iface.c
45245index e2d4e58..40cd045 100644
45246--- a/drivers/isdn/i4l/isdn_x25iface.c
45247+++ b/drivers/isdn/i4l/isdn_x25iface.c
45248@@ -53,14 +53,14 @@ static int isdn_x25iface_disconn_ind(struct concap_proto *);
45249
45250
45251 static struct concap_proto_ops ix25_pops = {
45252- &isdn_x25iface_proto_new,
45253- &isdn_x25iface_proto_del,
45254- &isdn_x25iface_proto_restart,
45255- &isdn_x25iface_proto_close,
45256- &isdn_x25iface_xmit,
45257- &isdn_x25iface_receive,
45258- &isdn_x25iface_connect_ind,
45259- &isdn_x25iface_disconn_ind
45260+ .proto_new = &isdn_x25iface_proto_new,
45261+ .proto_del = &isdn_x25iface_proto_del,
45262+ .restart = &isdn_x25iface_proto_restart,
45263+ .close = &isdn_x25iface_proto_close,
45264+ .encap_and_xmit = &isdn_x25iface_xmit,
45265+ .data_ind = &isdn_x25iface_receive,
45266+ .connect_ind = &isdn_x25iface_connect_ind,
45267+ .disconn_ind = &isdn_x25iface_disconn_ind
45268 };
45269
45270 /* error message helper function */
45271diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
45272index 6a7447c..cae33fe 100644
45273--- a/drivers/isdn/icn/icn.c
45274+++ b/drivers/isdn/icn/icn.c
45275@@ -1045,7 +1045,7 @@ icn_writecmd(const u_char *buf, int len, int user, icn_card *card)
45276 if (count > len)
45277 count = len;
45278 if (user) {
45279- if (copy_from_user(msg, buf, count))
45280+ if (count > sizeof msg || copy_from_user(msg, buf, count))
45281 return -EFAULT;
45282 } else
45283 memcpy(msg, buf, count);
45284diff --git a/drivers/isdn/mISDN/dsp_cmx.c b/drivers/isdn/mISDN/dsp_cmx.c
45285index a4f05c5..1433bc5 100644
45286--- a/drivers/isdn/mISDN/dsp_cmx.c
45287+++ b/drivers/isdn/mISDN/dsp_cmx.c
45288@@ -1628,7 +1628,7 @@ unsigned long dsp_spl_jiffies; /* calculate the next time to fire */
45289 static u16 dsp_count; /* last sample count */
45290 static int dsp_count_valid; /* if we have last sample count */
45291
45292-void
45293+void __intentional_overflow(-1)
45294 dsp_cmx_send(void *arg)
45295 {
45296 struct dsp_conf *conf;
45297diff --git a/drivers/leds/leds-clevo-mail.c b/drivers/leds/leds-clevo-mail.c
45298index f58a354..fbae176 100644
45299--- a/drivers/leds/leds-clevo-mail.c
45300+++ b/drivers/leds/leds-clevo-mail.c
45301@@ -40,7 +40,7 @@ static int __init clevo_mail_led_dmi_callback(const struct dmi_system_id *id)
45302 * detected as working, but in reality it is not) as low as
45303 * possible.
45304 */
45305-static struct dmi_system_id clevo_mail_led_dmi_table[] __initdata = {
45306+static struct dmi_system_id clevo_mail_led_dmi_table[] __initconst = {
45307 {
45308 .callback = clevo_mail_led_dmi_callback,
45309 .ident = "Clevo D410J",
45310diff --git a/drivers/leds/leds-ss4200.c b/drivers/leds/leds-ss4200.c
45311index 046cb70..6b20d39 100644
45312--- a/drivers/leds/leds-ss4200.c
45313+++ b/drivers/leds/leds-ss4200.c
45314@@ -91,7 +91,7 @@ MODULE_PARM_DESC(nodetect, "Skip DMI-based hardware detection");
45315 * detected as working, but in reality it is not) as low as
45316 * possible.
45317 */
45318-static struct dmi_system_id nas_led_whitelist[] __initdata = {
45319+static struct dmi_system_id nas_led_whitelist[] __initconst = {
45320 {
45321 .callback = ss4200_led_dmi_callback,
45322 .ident = "Intel SS4200-E",
45323diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
45324index 6590558..a74c5dd 100644
45325--- a/drivers/lguest/core.c
45326+++ b/drivers/lguest/core.c
45327@@ -96,9 +96,17 @@ static __init int map_switcher(void)
45328 * The end address needs +1 because __get_vm_area allocates an
45329 * extra guard page, so we need space for that.
45330 */
45331+
45332+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
45333+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
45334+ VM_ALLOC | VM_KERNEXEC, switcher_addr, switcher_addr
45335+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
45336+#else
45337 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
45338 VM_ALLOC, switcher_addr, switcher_addr
45339 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
45340+#endif
45341+
45342 if (!switcher_vma) {
45343 err = -ENOMEM;
45344 printk("lguest: could not map switcher pages high\n");
45345@@ -121,7 +129,7 @@ static __init int map_switcher(void)
45346 * Now the Switcher is mapped at the right address, we can't fail!
45347 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
45348 */
45349- memcpy(switcher_vma->addr, start_switcher_text,
45350+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
45351 end_switcher_text - start_switcher_text);
45352
45353 printk(KERN_INFO "lguest: mapped switcher at %p\n",
45354diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
45355index e8b55c3..3514c37 100644
45356--- a/drivers/lguest/page_tables.c
45357+++ b/drivers/lguest/page_tables.c
45358@@ -559,7 +559,7 @@ void pin_page(struct lg_cpu *cpu, unsigned long vaddr)
45359 /*:*/
45360
45361 #ifdef CONFIG_X86_PAE
45362-static void release_pmd(pmd_t *spmd)
45363+static void __intentional_overflow(-1) release_pmd(pmd_t *spmd)
45364 {
45365 /* If the entry's not present, there's nothing to release. */
45366 if (pmd_flags(*spmd) & _PAGE_PRESENT) {
45367diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
45368index 922a1ac..9dd0c2a 100644
45369--- a/drivers/lguest/x86/core.c
45370+++ b/drivers/lguest/x86/core.c
45371@@ -59,7 +59,7 @@ static struct {
45372 /* Offset from where switcher.S was compiled to where we've copied it */
45373 static unsigned long switcher_offset(void)
45374 {
45375- return switcher_addr - (unsigned long)start_switcher_text;
45376+ return switcher_addr - (unsigned long)ktla_ktva(start_switcher_text);
45377 }
45378
45379 /* This cpu's struct lguest_pages (after the Switcher text page) */
45380@@ -99,7 +99,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
45381 * These copies are pretty cheap, so we do them unconditionally: */
45382 /* Save the current Host top-level page directory.
45383 */
45384+
45385+#ifdef CONFIG_PAX_PER_CPU_PGD
45386+ pages->state.host_cr3 = read_cr3();
45387+#else
45388 pages->state.host_cr3 = __pa(current->mm->pgd);
45389+#endif
45390+
45391 /*
45392 * Set up the Guest's page tables to see this CPU's pages (and no
45393 * other CPU's pages).
45394@@ -477,7 +483,7 @@ void __init lguest_arch_host_init(void)
45395 * compiled-in switcher code and the high-mapped copy we just made.
45396 */
45397 for (i = 0; i < IDT_ENTRIES; i++)
45398- default_idt_entries[i] += switcher_offset();
45399+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
45400
45401 /*
45402 * Set up the Switcher's per-cpu areas.
45403@@ -560,7 +566,7 @@ void __init lguest_arch_host_init(void)
45404 * it will be undisturbed when we switch. To change %cs and jump we
45405 * need this structure to feed to Intel's "lcall" instruction.
45406 */
45407- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
45408+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
45409 lguest_entry.segment = LGUEST_CS;
45410
45411 /*
45412diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
45413index 40634b0..4f5855e 100644
45414--- a/drivers/lguest/x86/switcher_32.S
45415+++ b/drivers/lguest/x86/switcher_32.S
45416@@ -87,6 +87,7 @@
45417 #include <asm/page.h>
45418 #include <asm/segment.h>
45419 #include <asm/lguest.h>
45420+#include <asm/processor-flags.h>
45421
45422 // We mark the start of the code to copy
45423 // It's placed in .text tho it's never run here
45424@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
45425 // Changes type when we load it: damn Intel!
45426 // For after we switch over our page tables
45427 // That entry will be read-only: we'd crash.
45428+
45429+#ifdef CONFIG_PAX_KERNEXEC
45430+ mov %cr0, %edx
45431+ xor $X86_CR0_WP, %edx
45432+ mov %edx, %cr0
45433+#endif
45434+
45435 movl $(GDT_ENTRY_TSS*8), %edx
45436 ltr %dx
45437
45438@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
45439 // Let's clear it again for our return.
45440 // The GDT descriptor of the Host
45441 // Points to the table after two "size" bytes
45442- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
45443+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
45444 // Clear "used" from type field (byte 5, bit 2)
45445- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
45446+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
45447+
45448+#ifdef CONFIG_PAX_KERNEXEC
45449+ mov %cr0, %eax
45450+ xor $X86_CR0_WP, %eax
45451+ mov %eax, %cr0
45452+#endif
45453
45454 // Once our page table's switched, the Guest is live!
45455 // The Host fades as we run this final step.
45456@@ -295,13 +309,12 @@ deliver_to_host:
45457 // I consulted gcc, and it gave
45458 // These instructions, which I gladly credit:
45459 leal (%edx,%ebx,8), %eax
45460- movzwl (%eax),%edx
45461- movl 4(%eax), %eax
45462- xorw %ax, %ax
45463- orl %eax, %edx
45464+ movl 4(%eax), %edx
45465+ movw (%eax), %dx
45466 // Now the address of the handler's in %edx
45467 // We call it now: its "iret" drops us home.
45468- jmp *%edx
45469+ ljmp $__KERNEL_CS, $1f
45470+1: jmp *%edx
45471
45472 // Every interrupt can come to us here
45473 // But we must truly tell each apart.
45474diff --git a/drivers/md/bcache/closure.h b/drivers/md/bcache/closure.h
45475index a08e3ee..df8ade2 100644
45476--- a/drivers/md/bcache/closure.h
45477+++ b/drivers/md/bcache/closure.h
45478@@ -238,7 +238,7 @@ static inline void closure_set_stopped(struct closure *cl)
45479 static inline void set_closure_fn(struct closure *cl, closure_fn *fn,
45480 struct workqueue_struct *wq)
45481 {
45482- BUG_ON(object_is_on_stack(cl));
45483+ BUG_ON(object_starts_on_stack(cl));
45484 closure_set_ip(cl);
45485 cl->fn = fn;
45486 cl->wq = wq;
45487diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
45488index 67f8b31..9418f2b 100644
45489--- a/drivers/md/bitmap.c
45490+++ b/drivers/md/bitmap.c
45491@@ -1775,7 +1775,7 @@ void bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
45492 chunk_kb ? "KB" : "B");
45493 if (bitmap->storage.file) {
45494 seq_printf(seq, ", file: ");
45495- seq_path(seq, &bitmap->storage.file->f_path, " \t\n");
45496+ seq_path(seq, &bitmap->storage.file->f_path, " \t\n\\");
45497 }
45498
45499 seq_printf(seq, "\n");
45500diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
45501index 5152142..623d141 100644
45502--- a/drivers/md/dm-ioctl.c
45503+++ b/drivers/md/dm-ioctl.c
45504@@ -1769,7 +1769,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
45505 cmd == DM_LIST_VERSIONS_CMD)
45506 return 0;
45507
45508- if ((cmd == DM_DEV_CREATE_CMD)) {
45509+ if (cmd == DM_DEV_CREATE_CMD) {
45510 if (!*param->name) {
45511 DMWARN("name not supplied when creating device");
45512 return -EINVAL;
45513diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
45514index 7dfdb5c..4caada6 100644
45515--- a/drivers/md/dm-raid1.c
45516+++ b/drivers/md/dm-raid1.c
45517@@ -40,7 +40,7 @@ enum dm_raid1_error {
45518
45519 struct mirror {
45520 struct mirror_set *ms;
45521- atomic_t error_count;
45522+ atomic_unchecked_t error_count;
45523 unsigned long error_type;
45524 struct dm_dev *dev;
45525 sector_t offset;
45526@@ -186,7 +186,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
45527 struct mirror *m;
45528
45529 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
45530- if (!atomic_read(&m->error_count))
45531+ if (!atomic_read_unchecked(&m->error_count))
45532 return m;
45533
45534 return NULL;
45535@@ -218,7 +218,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
45536 * simple way to tell if a device has encountered
45537 * errors.
45538 */
45539- atomic_inc(&m->error_count);
45540+ atomic_inc_unchecked(&m->error_count);
45541
45542 if (test_and_set_bit(error_type, &m->error_type))
45543 return;
45544@@ -409,7 +409,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
45545 struct mirror *m = get_default_mirror(ms);
45546
45547 do {
45548- if (likely(!atomic_read(&m->error_count)))
45549+ if (likely(!atomic_read_unchecked(&m->error_count)))
45550 return m;
45551
45552 if (m-- == ms->mirror)
45553@@ -423,7 +423,7 @@ static int default_ok(struct mirror *m)
45554 {
45555 struct mirror *default_mirror = get_default_mirror(m->ms);
45556
45557- return !atomic_read(&default_mirror->error_count);
45558+ return !atomic_read_unchecked(&default_mirror->error_count);
45559 }
45560
45561 static int mirror_available(struct mirror_set *ms, struct bio *bio)
45562@@ -560,7 +560,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
45563 */
45564 if (likely(region_in_sync(ms, region, 1)))
45565 m = choose_mirror(ms, bio->bi_iter.bi_sector);
45566- else if (m && atomic_read(&m->error_count))
45567+ else if (m && atomic_read_unchecked(&m->error_count))
45568 m = NULL;
45569
45570 if (likely(m))
45571@@ -927,7 +927,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
45572 }
45573
45574 ms->mirror[mirror].ms = ms;
45575- atomic_set(&(ms->mirror[mirror].error_count), 0);
45576+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
45577 ms->mirror[mirror].error_type = 0;
45578 ms->mirror[mirror].offset = offset;
45579
45580@@ -1342,7 +1342,7 @@ static void mirror_resume(struct dm_target *ti)
45581 */
45582 static char device_status_char(struct mirror *m)
45583 {
45584- if (!atomic_read(&(m->error_count)))
45585+ if (!atomic_read_unchecked(&(m->error_count)))
45586 return 'A';
45587
45588 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
45589diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
45590index 28a9012..9c0f6a5 100644
45591--- a/drivers/md/dm-stats.c
45592+++ b/drivers/md/dm-stats.c
45593@@ -382,7 +382,7 @@ do_sync_free:
45594 synchronize_rcu_expedited();
45595 dm_stat_free(&s->rcu_head);
45596 } else {
45597- ACCESS_ONCE(dm_stat_need_rcu_barrier) = 1;
45598+ ACCESS_ONCE_RW(dm_stat_need_rcu_barrier) = 1;
45599 call_rcu(&s->rcu_head, dm_stat_free);
45600 }
45601 return 0;
45602@@ -554,8 +554,8 @@ void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw,
45603 ((bi_rw & (REQ_WRITE | REQ_DISCARD)) ==
45604 (ACCESS_ONCE(last->last_rw) & (REQ_WRITE | REQ_DISCARD)))
45605 ));
45606- ACCESS_ONCE(last->last_sector) = end_sector;
45607- ACCESS_ONCE(last->last_rw) = bi_rw;
45608+ ACCESS_ONCE_RW(last->last_sector) = end_sector;
45609+ ACCESS_ONCE_RW(last->last_rw) = bi_rw;
45610 }
45611
45612 rcu_read_lock();
45613diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
45614index d1600d2..4c3af3a 100644
45615--- a/drivers/md/dm-stripe.c
45616+++ b/drivers/md/dm-stripe.c
45617@@ -21,7 +21,7 @@ struct stripe {
45618 struct dm_dev *dev;
45619 sector_t physical_start;
45620
45621- atomic_t error_count;
45622+ atomic_unchecked_t error_count;
45623 };
45624
45625 struct stripe_c {
45626@@ -186,7 +186,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
45627 kfree(sc);
45628 return r;
45629 }
45630- atomic_set(&(sc->stripe[i].error_count), 0);
45631+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
45632 }
45633
45634 ti->private = sc;
45635@@ -330,7 +330,7 @@ static void stripe_status(struct dm_target *ti, status_type_t type,
45636 DMEMIT("%d ", sc->stripes);
45637 for (i = 0; i < sc->stripes; i++) {
45638 DMEMIT("%s ", sc->stripe[i].dev->name);
45639- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
45640+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
45641 'D' : 'A';
45642 }
45643 buffer[i] = '\0';
45644@@ -375,8 +375,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio, int error)
45645 */
45646 for (i = 0; i < sc->stripes; i++)
45647 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
45648- atomic_inc(&(sc->stripe[i].error_count));
45649- if (atomic_read(&(sc->stripe[i].error_count)) <
45650+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
45651+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
45652 DM_IO_ERROR_THRESHOLD)
45653 schedule_work(&sc->trigger_event);
45654 }
45655diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
45656index f9c6cb8..e272df6 100644
45657--- a/drivers/md/dm-table.c
45658+++ b/drivers/md/dm-table.c
45659@@ -274,7 +274,7 @@ static struct dm_dev_internal *find_device(struct list_head *l, dev_t dev)
45660 static int open_dev(struct dm_dev_internal *d, dev_t dev,
45661 struct mapped_device *md)
45662 {
45663- static char *_claim_ptr = "I belong to device-mapper";
45664+ static char _claim_ptr[] = "I belong to device-mapper";
45665 struct block_device *bdev;
45666
45667 int r;
45668@@ -342,7 +342,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
45669 if (!dev_size)
45670 return 0;
45671
45672- if ((start >= dev_size) || (start + len > dev_size)) {
45673+ if ((start >= dev_size) || (len > dev_size - start)) {
45674 DMWARN("%s: %s too small for target: "
45675 "start=%llu, len=%llu, dev_size=%llu",
45676 dm_device_name(ti->table->md), bdevname(bdev, b),
45677diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
45678index e9d33ad..dae9880d 100644
45679--- a/drivers/md/dm-thin-metadata.c
45680+++ b/drivers/md/dm-thin-metadata.c
45681@@ -404,7 +404,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
45682 {
45683 pmd->info.tm = pmd->tm;
45684 pmd->info.levels = 2;
45685- pmd->info.value_type.context = pmd->data_sm;
45686+ pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
45687 pmd->info.value_type.size = sizeof(__le64);
45688 pmd->info.value_type.inc = data_block_inc;
45689 pmd->info.value_type.dec = data_block_dec;
45690@@ -423,7 +423,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
45691
45692 pmd->bl_info.tm = pmd->tm;
45693 pmd->bl_info.levels = 1;
45694- pmd->bl_info.value_type.context = pmd->data_sm;
45695+ pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
45696 pmd->bl_info.value_type.size = sizeof(__le64);
45697 pmd->bl_info.value_type.inc = data_block_inc;
45698 pmd->bl_info.value_type.dec = data_block_dec;
45699diff --git a/drivers/md/dm.c b/drivers/md/dm.c
45700index 32b958d..34011e8 100644
45701--- a/drivers/md/dm.c
45702+++ b/drivers/md/dm.c
45703@@ -180,9 +180,9 @@ struct mapped_device {
45704 /*
45705 * Event handling.
45706 */
45707- atomic_t event_nr;
45708+ atomic_unchecked_t event_nr;
45709 wait_queue_head_t eventq;
45710- atomic_t uevent_seq;
45711+ atomic_unchecked_t uevent_seq;
45712 struct list_head uevent_list;
45713 spinlock_t uevent_lock; /* Protect access to uevent_list */
45714
45715@@ -1952,8 +1952,8 @@ static struct mapped_device *alloc_dev(int minor)
45716 spin_lock_init(&md->deferred_lock);
45717 atomic_set(&md->holders, 1);
45718 atomic_set(&md->open_count, 0);
45719- atomic_set(&md->event_nr, 0);
45720- atomic_set(&md->uevent_seq, 0);
45721+ atomic_set_unchecked(&md->event_nr, 0);
45722+ atomic_set_unchecked(&md->uevent_seq, 0);
45723 INIT_LIST_HEAD(&md->uevent_list);
45724 spin_lock_init(&md->uevent_lock);
45725
45726@@ -2107,7 +2107,7 @@ static void event_callback(void *context)
45727
45728 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
45729
45730- atomic_inc(&md->event_nr);
45731+ atomic_inc_unchecked(&md->event_nr);
45732 wake_up(&md->eventq);
45733 }
45734
45735@@ -2800,18 +2800,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
45736
45737 uint32_t dm_next_uevent_seq(struct mapped_device *md)
45738 {
45739- return atomic_add_return(1, &md->uevent_seq);
45740+ return atomic_add_return_unchecked(1, &md->uevent_seq);
45741 }
45742
45743 uint32_t dm_get_event_nr(struct mapped_device *md)
45744 {
45745- return atomic_read(&md->event_nr);
45746+ return atomic_read_unchecked(&md->event_nr);
45747 }
45748
45749 int dm_wait_event(struct mapped_device *md, int event_nr)
45750 {
45751 return wait_event_interruptible(md->eventq,
45752- (event_nr != atomic_read(&md->event_nr)));
45753+ (event_nr != atomic_read_unchecked(&md->event_nr)));
45754 }
45755
45756 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
45757diff --git a/drivers/md/md.c b/drivers/md/md.c
45758index 1294238..a442227 100644
45759--- a/drivers/md/md.c
45760+++ b/drivers/md/md.c
45761@@ -194,10 +194,10 @@ EXPORT_SYMBOL_GPL(bio_clone_mddev);
45762 * start build, activate spare
45763 */
45764 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
45765-static atomic_t md_event_count;
45766+static atomic_unchecked_t md_event_count;
45767 void md_new_event(struct mddev *mddev)
45768 {
45769- atomic_inc(&md_event_count);
45770+ atomic_inc_unchecked(&md_event_count);
45771 wake_up(&md_event_waiters);
45772 }
45773 EXPORT_SYMBOL_GPL(md_new_event);
45774@@ -207,7 +207,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
45775 */
45776 static void md_new_event_inintr(struct mddev *mddev)
45777 {
45778- atomic_inc(&md_event_count);
45779+ atomic_inc_unchecked(&md_event_count);
45780 wake_up(&md_event_waiters);
45781 }
45782
45783@@ -1462,7 +1462,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
45784 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
45785 (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
45786 rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
45787- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
45788+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
45789
45790 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
45791 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
45792@@ -1713,7 +1713,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
45793 else
45794 sb->resync_offset = cpu_to_le64(0);
45795
45796- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
45797+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
45798
45799 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
45800 sb->size = cpu_to_le64(mddev->dev_sectors);
45801@@ -2725,7 +2725,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
45802 static ssize_t
45803 errors_show(struct md_rdev *rdev, char *page)
45804 {
45805- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
45806+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
45807 }
45808
45809 static ssize_t
45810@@ -2734,7 +2734,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
45811 char *e;
45812 unsigned long n = simple_strtoul(buf, &e, 10);
45813 if (*buf && (*e == 0 || *e == '\n')) {
45814- atomic_set(&rdev->corrected_errors, n);
45815+ atomic_set_unchecked(&rdev->corrected_errors, n);
45816 return len;
45817 }
45818 return -EINVAL;
45819@@ -3183,8 +3183,8 @@ int md_rdev_init(struct md_rdev *rdev)
45820 rdev->sb_loaded = 0;
45821 rdev->bb_page = NULL;
45822 atomic_set(&rdev->nr_pending, 0);
45823- atomic_set(&rdev->read_errors, 0);
45824- atomic_set(&rdev->corrected_errors, 0);
45825+ atomic_set_unchecked(&rdev->read_errors, 0);
45826+ atomic_set_unchecked(&rdev->corrected_errors, 0);
45827
45828 INIT_LIST_HEAD(&rdev->same_set);
45829 init_waitqueue_head(&rdev->blocked_wait);
45830@@ -7068,7 +7068,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
45831
45832 spin_unlock(&pers_lock);
45833 seq_printf(seq, "\n");
45834- seq->poll_event = atomic_read(&md_event_count);
45835+ seq->poll_event = atomic_read_unchecked(&md_event_count);
45836 return 0;
45837 }
45838 if (v == (void*)2) {
45839@@ -7171,7 +7171,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
45840 return error;
45841
45842 seq = file->private_data;
45843- seq->poll_event = atomic_read(&md_event_count);
45844+ seq->poll_event = atomic_read_unchecked(&md_event_count);
45845 return error;
45846 }
45847
45848@@ -7188,7 +7188,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
45849 /* always allow read */
45850 mask = POLLIN | POLLRDNORM;
45851
45852- if (seq->poll_event != atomic_read(&md_event_count))
45853+ if (seq->poll_event != atomic_read_unchecked(&md_event_count))
45854 mask |= POLLERR | POLLPRI;
45855 return mask;
45856 }
45857@@ -7232,7 +7232,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
45858 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
45859 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
45860 (int)part_stat_read(&disk->part0, sectors[1]) -
45861- atomic_read(&disk->sync_io);
45862+ atomic_read_unchecked(&disk->sync_io);
45863 /* sync IO will cause sync_io to increase before the disk_stats
45864 * as sync_io is counted when a request starts, and
45865 * disk_stats is counted when it completes.
45866diff --git a/drivers/md/md.h b/drivers/md/md.h
45867index a49d991..3582bb7 100644
45868--- a/drivers/md/md.h
45869+++ b/drivers/md/md.h
45870@@ -94,13 +94,13 @@ struct md_rdev {
45871 * only maintained for arrays that
45872 * support hot removal
45873 */
45874- atomic_t read_errors; /* number of consecutive read errors that
45875+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
45876 * we have tried to ignore.
45877 */
45878 struct timespec last_read_error; /* monotonic time since our
45879 * last read error
45880 */
45881- atomic_t corrected_errors; /* number of corrected read errors,
45882+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
45883 * for reporting to userspace and storing
45884 * in superblock.
45885 */
45886@@ -449,7 +449,7 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
45887
45888 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
45889 {
45890- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
45891+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
45892 }
45893
45894 struct md_personality
45895diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
45896index 786b689..ea8c956 100644
45897--- a/drivers/md/persistent-data/dm-space-map-metadata.c
45898+++ b/drivers/md/persistent-data/dm-space-map-metadata.c
45899@@ -679,7 +679,7 @@ static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
45900 * Flick into a mode where all blocks get allocated in the new area.
45901 */
45902 smm->begin = old_len;
45903- memcpy(sm, &bootstrap_ops, sizeof(*sm));
45904+ memcpy((void *)sm, &bootstrap_ops, sizeof(*sm));
45905
45906 /*
45907 * Extend.
45908@@ -710,7 +710,7 @@ out:
45909 /*
45910 * Switch back to normal behaviour.
45911 */
45912- memcpy(sm, &ops, sizeof(*sm));
45913+ memcpy((void *)sm, &ops, sizeof(*sm));
45914 return r;
45915 }
45916
45917diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
45918index 3e6d115..ffecdeb 100644
45919--- a/drivers/md/persistent-data/dm-space-map.h
45920+++ b/drivers/md/persistent-data/dm-space-map.h
45921@@ -71,6 +71,7 @@ struct dm_space_map {
45922 dm_sm_threshold_fn fn,
45923 void *context);
45924 };
45925+typedef struct dm_space_map __no_const dm_space_map_no_const;
45926
45927 /*----------------------------------------------------------------*/
45928
45929diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
45930index 55de4f6..b1c57fe 100644
45931--- a/drivers/md/raid1.c
45932+++ b/drivers/md/raid1.c
45933@@ -1936,7 +1936,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
45934 if (r1_sync_page_io(rdev, sect, s,
45935 bio->bi_io_vec[idx].bv_page,
45936 READ) != 0)
45937- atomic_add(s, &rdev->corrected_errors);
45938+ atomic_add_unchecked(s, &rdev->corrected_errors);
45939 }
45940 sectors -= s;
45941 sect += s;
45942@@ -2170,7 +2170,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
45943 !test_bit(Faulty, &rdev->flags)) {
45944 if (r1_sync_page_io(rdev, sect, s,
45945 conf->tmppage, READ)) {
45946- atomic_add(s, &rdev->corrected_errors);
45947+ atomic_add_unchecked(s, &rdev->corrected_errors);
45948 printk(KERN_INFO
45949 "md/raid1:%s: read error corrected "
45950 "(%d sectors at %llu on %s)\n",
45951diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
45952index 6703751..187af1e 100644
45953--- a/drivers/md/raid10.c
45954+++ b/drivers/md/raid10.c
45955@@ -1948,7 +1948,7 @@ static void end_sync_read(struct bio *bio, int error)
45956 /* The write handler will notice the lack of
45957 * R10BIO_Uptodate and record any errors etc
45958 */
45959- atomic_add(r10_bio->sectors,
45960+ atomic_add_unchecked(r10_bio->sectors,
45961 &conf->mirrors[d].rdev->corrected_errors);
45962
45963 /* for reconstruct, we always reschedule after a read.
45964@@ -2306,7 +2306,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
45965 {
45966 struct timespec cur_time_mon;
45967 unsigned long hours_since_last;
45968- unsigned int read_errors = atomic_read(&rdev->read_errors);
45969+ unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
45970
45971 ktime_get_ts(&cur_time_mon);
45972
45973@@ -2328,9 +2328,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
45974 * overflowing the shift of read_errors by hours_since_last.
45975 */
45976 if (hours_since_last >= 8 * sizeof(read_errors))
45977- atomic_set(&rdev->read_errors, 0);
45978+ atomic_set_unchecked(&rdev->read_errors, 0);
45979 else
45980- atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
45981+ atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
45982 }
45983
45984 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
45985@@ -2384,8 +2384,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
45986 return;
45987
45988 check_decay_read_errors(mddev, rdev);
45989- atomic_inc(&rdev->read_errors);
45990- if (atomic_read(&rdev->read_errors) > max_read_errors) {
45991+ atomic_inc_unchecked(&rdev->read_errors);
45992+ if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
45993 char b[BDEVNAME_SIZE];
45994 bdevname(rdev->bdev, b);
45995
45996@@ -2393,7 +2393,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
45997 "md/raid10:%s: %s: Raid device exceeded "
45998 "read_error threshold [cur %d:max %d]\n",
45999 mdname(mddev), b,
46000- atomic_read(&rdev->read_errors), max_read_errors);
46001+ atomic_read_unchecked(&rdev->read_errors), max_read_errors);
46002 printk(KERN_NOTICE
46003 "md/raid10:%s: %s: Failing raid device\n",
46004 mdname(mddev), b);
46005@@ -2548,7 +2548,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
46006 sect +
46007 choose_data_offset(r10_bio, rdev)),
46008 bdevname(rdev->bdev, b));
46009- atomic_add(s, &rdev->corrected_errors);
46010+ atomic_add_unchecked(s, &rdev->corrected_errors);
46011 }
46012
46013 rdev_dec_pending(rdev, mddev);
46014diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
46015index 9f0fbec..991e7a1 100644
46016--- a/drivers/md/raid5.c
46017+++ b/drivers/md/raid5.c
46018@@ -1735,6 +1735,10 @@ static int grow_one_stripe(struct r5conf *conf, int hash)
46019 return 1;
46020 }
46021
46022+#ifdef CONFIG_GRKERNSEC_HIDESYM
46023+static atomic_unchecked_t raid5_cache_id = ATOMIC_INIT(0);
46024+#endif
46025+
46026 static int grow_stripes(struct r5conf *conf, int num)
46027 {
46028 struct kmem_cache *sc;
46029@@ -1746,7 +1750,11 @@ static int grow_stripes(struct r5conf *conf, int num)
46030 "raid%d-%s", conf->level, mdname(conf->mddev));
46031 else
46032 sprintf(conf->cache_name[0],
46033+#ifdef CONFIG_GRKERNSEC_HIDESYM
46034+ "raid%d-%08lx", conf->level, atomic_inc_return_unchecked(&raid5_cache_id));
46035+#else
46036 "raid%d-%p", conf->level, conf->mddev);
46037+#endif
46038 sprintf(conf->cache_name[1], "%s-alt", conf->cache_name[0]);
46039
46040 conf->active_name = 0;
46041@@ -2022,21 +2030,21 @@ static void raid5_end_read_request(struct bio * bi, int error)
46042 mdname(conf->mddev), STRIPE_SECTORS,
46043 (unsigned long long)s,
46044 bdevname(rdev->bdev, b));
46045- atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
46046+ atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
46047 clear_bit(R5_ReadError, &sh->dev[i].flags);
46048 clear_bit(R5_ReWrite, &sh->dev[i].flags);
46049 } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
46050 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
46051
46052- if (atomic_read(&rdev->read_errors))
46053- atomic_set(&rdev->read_errors, 0);
46054+ if (atomic_read_unchecked(&rdev->read_errors))
46055+ atomic_set_unchecked(&rdev->read_errors, 0);
46056 } else {
46057 const char *bdn = bdevname(rdev->bdev, b);
46058 int retry = 0;
46059 int set_bad = 0;
46060
46061 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
46062- atomic_inc(&rdev->read_errors);
46063+ atomic_inc_unchecked(&rdev->read_errors);
46064 if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
46065 printk_ratelimited(
46066 KERN_WARNING
46067@@ -2064,7 +2072,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
46068 mdname(conf->mddev),
46069 (unsigned long long)s,
46070 bdn);
46071- } else if (atomic_read(&rdev->read_errors)
46072+ } else if (atomic_read_unchecked(&rdev->read_errors)
46073 > conf->max_nr_stripes)
46074 printk(KERN_WARNING
46075 "md/raid:%s: Too many read errors, failing device %s.\n",
46076diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
46077index 983db75..ef9248c 100644
46078--- a/drivers/media/dvb-core/dvbdev.c
46079+++ b/drivers/media/dvb-core/dvbdev.c
46080@@ -185,7 +185,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
46081 const struct dvb_device *template, void *priv, int type)
46082 {
46083 struct dvb_device *dvbdev;
46084- struct file_operations *dvbdevfops;
46085+ file_operations_no_const *dvbdevfops;
46086 struct device *clsdev;
46087 int minor;
46088 int id;
46089diff --git a/drivers/media/dvb-frontends/af9033.h b/drivers/media/dvb-frontends/af9033.h
46090index 539f4db..cdd403b 100644
46091--- a/drivers/media/dvb-frontends/af9033.h
46092+++ b/drivers/media/dvb-frontends/af9033.h
46093@@ -82,7 +82,7 @@ struct af9033_ops {
46094 int (*pid_filter_ctrl)(struct dvb_frontend *fe, int onoff);
46095 int (*pid_filter)(struct dvb_frontend *fe, int index, u16 pid,
46096 int onoff);
46097-};
46098+} __no_const;
46099
46100
46101 #if IS_ENABLED(CONFIG_DVB_AF9033)
46102diff --git a/drivers/media/dvb-frontends/dib3000.h b/drivers/media/dvb-frontends/dib3000.h
46103index 9b6c3bb..baeb5c7 100644
46104--- a/drivers/media/dvb-frontends/dib3000.h
46105+++ b/drivers/media/dvb-frontends/dib3000.h
46106@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
46107 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
46108 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
46109 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
46110-};
46111+} __no_const;
46112
46113 #if IS_ENABLED(CONFIG_DVB_DIB3000MB)
46114 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
46115diff --git a/drivers/media/dvb-frontends/dib7000p.h b/drivers/media/dvb-frontends/dib7000p.h
46116index 1fea0e9..321ce8f 100644
46117--- a/drivers/media/dvb-frontends/dib7000p.h
46118+++ b/drivers/media/dvb-frontends/dib7000p.h
46119@@ -64,7 +64,7 @@ struct dib7000p_ops {
46120 int (*get_adc_power)(struct dvb_frontend *fe);
46121 int (*slave_reset)(struct dvb_frontend *fe);
46122 struct dvb_frontend *(*init)(struct i2c_adapter *i2c_adap, u8 i2c_addr, struct dib7000p_config *cfg);
46123-};
46124+} __no_const;
46125
46126 #if IS_ENABLED(CONFIG_DVB_DIB7000P)
46127 void *dib7000p_attach(struct dib7000p_ops *ops);
46128diff --git a/drivers/media/dvb-frontends/dib8000.h b/drivers/media/dvb-frontends/dib8000.h
46129index 84cc103..5780c54 100644
46130--- a/drivers/media/dvb-frontends/dib8000.h
46131+++ b/drivers/media/dvb-frontends/dib8000.h
46132@@ -61,7 +61,7 @@ struct dib8000_ops {
46133 int (*pid_filter_ctrl)(struct dvb_frontend *fe, u8 onoff);
46134 int (*pid_filter)(struct dvb_frontend *fe, u8 id, u16 pid, u8 onoff);
46135 struct dvb_frontend *(*init)(struct i2c_adapter *i2c_adap, u8 i2c_addr, struct dib8000_config *cfg);
46136-};
46137+} __no_const;
46138
46139 #if IS_ENABLED(CONFIG_DVB_DIB8000)
46140 void *dib8000_attach(struct dib8000_ops *ops);
46141diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c
46142index ed8cb90..5ef7f79 100644
46143--- a/drivers/media/pci/cx88/cx88-video.c
46144+++ b/drivers/media/pci/cx88/cx88-video.c
46145@@ -50,9 +50,9 @@ MODULE_VERSION(CX88_VERSION);
46146
46147 /* ------------------------------------------------------------------ */
46148
46149-static unsigned int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
46150-static unsigned int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
46151-static unsigned int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
46152+static int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
46153+static int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
46154+static int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
46155
46156 module_param_array(video_nr, int, NULL, 0444);
46157 module_param_array(vbi_nr, int, NULL, 0444);
46158diff --git a/drivers/media/pci/ivtv/ivtv-driver.c b/drivers/media/pci/ivtv/ivtv-driver.c
46159index 802642d..5534900 100644
46160--- a/drivers/media/pci/ivtv/ivtv-driver.c
46161+++ b/drivers/media/pci/ivtv/ivtv-driver.c
46162@@ -83,7 +83,7 @@ static struct pci_device_id ivtv_pci_tbl[] = {
46163 MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl);
46164
46165 /* ivtv instance counter */
46166-static atomic_t ivtv_instance = ATOMIC_INIT(0);
46167+static atomic_unchecked_t ivtv_instance = ATOMIC_INIT(0);
46168
46169 /* Parameter declarations */
46170 static int cardtype[IVTV_MAX_CARDS];
46171diff --git a/drivers/media/pci/solo6x10/solo6x10-core.c b/drivers/media/pci/solo6x10/solo6x10-core.c
46172index 172583d..0f806f4 100644
46173--- a/drivers/media/pci/solo6x10/solo6x10-core.c
46174+++ b/drivers/media/pci/solo6x10/solo6x10-core.c
46175@@ -430,7 +430,7 @@ static void solo_device_release(struct device *dev)
46176
46177 static int solo_sysfs_init(struct solo_dev *solo_dev)
46178 {
46179- struct bin_attribute *sdram_attr = &solo_dev->sdram_attr;
46180+ bin_attribute_no_const *sdram_attr = &solo_dev->sdram_attr;
46181 struct device *dev = &solo_dev->dev;
46182 const char *driver;
46183 int i;
46184diff --git a/drivers/media/pci/solo6x10/solo6x10-g723.c b/drivers/media/pci/solo6x10/solo6x10-g723.c
46185index c7141f2..5301fec 100644
46186--- a/drivers/media/pci/solo6x10/solo6x10-g723.c
46187+++ b/drivers/media/pci/solo6x10/solo6x10-g723.c
46188@@ -351,7 +351,7 @@ static int solo_snd_pcm_init(struct solo_dev *solo_dev)
46189
46190 int solo_g723_init(struct solo_dev *solo_dev)
46191 {
46192- static struct snd_device_ops ops = { NULL };
46193+ static struct snd_device_ops ops = { };
46194 struct snd_card *card;
46195 struct snd_kcontrol_new kctl;
46196 char name[32];
46197diff --git a/drivers/media/pci/solo6x10/solo6x10-p2m.c b/drivers/media/pci/solo6x10/solo6x10-p2m.c
46198index 8c84846..27b4f83 100644
46199--- a/drivers/media/pci/solo6x10/solo6x10-p2m.c
46200+++ b/drivers/media/pci/solo6x10/solo6x10-p2m.c
46201@@ -73,7 +73,7 @@ int solo_p2m_dma_desc(struct solo_dev *solo_dev,
46202
46203 /* Get next ID. According to Softlogic, 6110 has problems on !=0 P2M */
46204 if (solo_dev->type != SOLO_DEV_6110 && multi_p2m) {
46205- p2m_id = atomic_inc_return(&solo_dev->p2m_count) % SOLO_NR_P2M;
46206+ p2m_id = atomic_inc_return_unchecked(&solo_dev->p2m_count) % SOLO_NR_P2M;
46207 if (p2m_id < 0)
46208 p2m_id = -p2m_id;
46209 }
46210diff --git a/drivers/media/pci/solo6x10/solo6x10.h b/drivers/media/pci/solo6x10/solo6x10.h
46211index c6154b0..73e4ae9 100644
46212--- a/drivers/media/pci/solo6x10/solo6x10.h
46213+++ b/drivers/media/pci/solo6x10/solo6x10.h
46214@@ -219,7 +219,7 @@ struct solo_dev {
46215
46216 /* P2M DMA Engine */
46217 struct solo_p2m_dev p2m_dev[SOLO_NR_P2M];
46218- atomic_t p2m_count;
46219+ atomic_unchecked_t p2m_count;
46220 int p2m_jiffies;
46221 unsigned int p2m_timeouts;
46222
46223diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
46224index 2d177fa..5b925a1 100644
46225--- a/drivers/media/platform/omap/omap_vout.c
46226+++ b/drivers/media/platform/omap/omap_vout.c
46227@@ -63,7 +63,6 @@ enum omap_vout_channels {
46228 OMAP_VIDEO2,
46229 };
46230
46231-static struct videobuf_queue_ops video_vbq_ops;
46232 /* Variables configurable through module params*/
46233 static u32 video1_numbuffers = 3;
46234 static u32 video2_numbuffers = 3;
46235@@ -1014,6 +1013,12 @@ static int omap_vout_open(struct file *file)
46236 {
46237 struct videobuf_queue *q;
46238 struct omap_vout_device *vout = NULL;
46239+ static struct videobuf_queue_ops video_vbq_ops = {
46240+ .buf_setup = omap_vout_buffer_setup,
46241+ .buf_prepare = omap_vout_buffer_prepare,
46242+ .buf_release = omap_vout_buffer_release,
46243+ .buf_queue = omap_vout_buffer_queue,
46244+ };
46245
46246 vout = video_drvdata(file);
46247 v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
46248@@ -1031,10 +1036,6 @@ static int omap_vout_open(struct file *file)
46249 vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
46250
46251 q = &vout->vbq;
46252- video_vbq_ops.buf_setup = omap_vout_buffer_setup;
46253- video_vbq_ops.buf_prepare = omap_vout_buffer_prepare;
46254- video_vbq_ops.buf_release = omap_vout_buffer_release;
46255- video_vbq_ops.buf_queue = omap_vout_buffer_queue;
46256 spin_lock_init(&vout->vbq_lock);
46257
46258 videobuf_queue_dma_contig_init(q, &video_vbq_ops, q->dev,
46259diff --git a/drivers/media/platform/s5p-tv/mixer.h b/drivers/media/platform/s5p-tv/mixer.h
46260index fb2acc5..a2fcbdc4 100644
46261--- a/drivers/media/platform/s5p-tv/mixer.h
46262+++ b/drivers/media/platform/s5p-tv/mixer.h
46263@@ -156,7 +156,7 @@ struct mxr_layer {
46264 /** layer index (unique identifier) */
46265 int idx;
46266 /** callbacks for layer methods */
46267- struct mxr_layer_ops ops;
46268+ struct mxr_layer_ops *ops;
46269 /** format array */
46270 const struct mxr_format **fmt_array;
46271 /** size of format array */
46272diff --git a/drivers/media/platform/s5p-tv/mixer_grp_layer.c b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
46273index 74344c7..a39e70e 100644
46274--- a/drivers/media/platform/s5p-tv/mixer_grp_layer.c
46275+++ b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
46276@@ -235,7 +235,7 @@ struct mxr_layer *mxr_graph_layer_create(struct mxr_device *mdev, int idx)
46277 {
46278 struct mxr_layer *layer;
46279 int ret;
46280- struct mxr_layer_ops ops = {
46281+ static struct mxr_layer_ops ops = {
46282 .release = mxr_graph_layer_release,
46283 .buffer_set = mxr_graph_buffer_set,
46284 .stream_set = mxr_graph_stream_set,
46285diff --git a/drivers/media/platform/s5p-tv/mixer_reg.c b/drivers/media/platform/s5p-tv/mixer_reg.c
46286index b713403..53cb5ad 100644
46287--- a/drivers/media/platform/s5p-tv/mixer_reg.c
46288+++ b/drivers/media/platform/s5p-tv/mixer_reg.c
46289@@ -276,7 +276,7 @@ static void mxr_irq_layer_handle(struct mxr_layer *layer)
46290 layer->update_buf = next;
46291 }
46292
46293- layer->ops.buffer_set(layer, layer->update_buf);
46294+ layer->ops->buffer_set(layer, layer->update_buf);
46295
46296 if (done && done != layer->shadow_buf)
46297 vb2_buffer_done(&done->vb, VB2_BUF_STATE_DONE);
46298diff --git a/drivers/media/platform/s5p-tv/mixer_video.c b/drivers/media/platform/s5p-tv/mixer_video.c
46299index b4d2696..91df48e 100644
46300--- a/drivers/media/platform/s5p-tv/mixer_video.c
46301+++ b/drivers/media/platform/s5p-tv/mixer_video.c
46302@@ -210,7 +210,7 @@ static void mxr_layer_default_geo(struct mxr_layer *layer)
46303 layer->geo.src.height = layer->geo.src.full_height;
46304
46305 mxr_geometry_dump(mdev, &layer->geo);
46306- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
46307+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
46308 mxr_geometry_dump(mdev, &layer->geo);
46309 }
46310
46311@@ -228,7 +228,7 @@ static void mxr_layer_update_output(struct mxr_layer *layer)
46312 layer->geo.dst.full_width = mbus_fmt.width;
46313 layer->geo.dst.full_height = mbus_fmt.height;
46314 layer->geo.dst.field = mbus_fmt.field;
46315- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
46316+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
46317
46318 mxr_geometry_dump(mdev, &layer->geo);
46319 }
46320@@ -334,7 +334,7 @@ static int mxr_s_fmt(struct file *file, void *priv,
46321 /* set source size to highest accepted value */
46322 geo->src.full_width = max(geo->dst.full_width, pix->width);
46323 geo->src.full_height = max(geo->dst.full_height, pix->height);
46324- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
46325+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
46326 mxr_geometry_dump(mdev, &layer->geo);
46327 /* set cropping to total visible screen */
46328 geo->src.width = pix->width;
46329@@ -342,12 +342,12 @@ static int mxr_s_fmt(struct file *file, void *priv,
46330 geo->src.x_offset = 0;
46331 geo->src.y_offset = 0;
46332 /* assure consistency of geometry */
46333- layer->ops.fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
46334+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
46335 mxr_geometry_dump(mdev, &layer->geo);
46336 /* set full size to lowest possible value */
46337 geo->src.full_width = 0;
46338 geo->src.full_height = 0;
46339- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
46340+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
46341 mxr_geometry_dump(mdev, &layer->geo);
46342
46343 /* returning results */
46344@@ -474,7 +474,7 @@ static int mxr_s_selection(struct file *file, void *fh,
46345 target->width = s->r.width;
46346 target->height = s->r.height;
46347
46348- layer->ops.fix_geometry(layer, stage, s->flags);
46349+ layer->ops->fix_geometry(layer, stage, s->flags);
46350
46351 /* retrieve update selection rectangle */
46352 res.left = target->x_offset;
46353@@ -954,13 +954,13 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count)
46354 mxr_output_get(mdev);
46355
46356 mxr_layer_update_output(layer);
46357- layer->ops.format_set(layer);
46358+ layer->ops->format_set(layer);
46359 /* enabling layer in hardware */
46360 spin_lock_irqsave(&layer->enq_slock, flags);
46361 layer->state = MXR_LAYER_STREAMING;
46362 spin_unlock_irqrestore(&layer->enq_slock, flags);
46363
46364- layer->ops.stream_set(layer, MXR_ENABLE);
46365+ layer->ops->stream_set(layer, MXR_ENABLE);
46366 mxr_streamer_get(mdev);
46367
46368 return 0;
46369@@ -1030,7 +1030,7 @@ static void stop_streaming(struct vb2_queue *vq)
46370 spin_unlock_irqrestore(&layer->enq_slock, flags);
46371
46372 /* disabling layer in hardware */
46373- layer->ops.stream_set(layer, MXR_DISABLE);
46374+ layer->ops->stream_set(layer, MXR_DISABLE);
46375 /* remove one streamer */
46376 mxr_streamer_put(mdev);
46377 /* allow changes in output configuration */
46378@@ -1068,8 +1068,8 @@ void mxr_base_layer_unregister(struct mxr_layer *layer)
46379
46380 void mxr_layer_release(struct mxr_layer *layer)
46381 {
46382- if (layer->ops.release)
46383- layer->ops.release(layer);
46384+ if (layer->ops->release)
46385+ layer->ops->release(layer);
46386 }
46387
46388 void mxr_base_layer_release(struct mxr_layer *layer)
46389@@ -1095,7 +1095,7 @@ struct mxr_layer *mxr_base_layer_create(struct mxr_device *mdev,
46390
46391 layer->mdev = mdev;
46392 layer->idx = idx;
46393- layer->ops = *ops;
46394+ layer->ops = ops;
46395
46396 spin_lock_init(&layer->enq_slock);
46397 INIT_LIST_HEAD(&layer->enq_list);
46398diff --git a/drivers/media/platform/s5p-tv/mixer_vp_layer.c b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
46399index c9388c4..ce71ece 100644
46400--- a/drivers/media/platform/s5p-tv/mixer_vp_layer.c
46401+++ b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
46402@@ -206,7 +206,7 @@ struct mxr_layer *mxr_vp_layer_create(struct mxr_device *mdev, int idx)
46403 {
46404 struct mxr_layer *layer;
46405 int ret;
46406- struct mxr_layer_ops ops = {
46407+ static struct mxr_layer_ops ops = {
46408 .release = mxr_vp_layer_release,
46409 .buffer_set = mxr_vp_buffer_set,
46410 .stream_set = mxr_vp_stream_set,
46411diff --git a/drivers/media/platform/vivi.c b/drivers/media/platform/vivi.c
46412index 8033371..de5bca0 100644
46413--- a/drivers/media/platform/vivi.c
46414+++ b/drivers/media/platform/vivi.c
46415@@ -58,8 +58,8 @@ MODULE_AUTHOR("Mauro Carvalho Chehab, Ted Walther and John Sokol");
46416 MODULE_LICENSE("Dual BSD/GPL");
46417 MODULE_VERSION(VIVI_VERSION);
46418
46419-static unsigned video_nr = -1;
46420-module_param(video_nr, uint, 0644);
46421+static int video_nr = -1;
46422+module_param(video_nr, int, 0644);
46423 MODULE_PARM_DESC(video_nr, "videoX start number, -1 is autodetect");
46424
46425 static unsigned n_devs = 1;
46426diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
46427index 82affae..42833ec 100644
46428--- a/drivers/media/radio/radio-cadet.c
46429+++ b/drivers/media/radio/radio-cadet.c
46430@@ -333,6 +333,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
46431 unsigned char readbuf[RDS_BUFFER];
46432 int i = 0;
46433
46434+ if (count > RDS_BUFFER)
46435+ return -EFAULT;
46436 mutex_lock(&dev->lock);
46437 if (dev->rdsstat == 0)
46438 cadet_start_rds(dev);
46439@@ -349,8 +351,9 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
46440 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
46441 mutex_unlock(&dev->lock);
46442
46443- if (i && copy_to_user(data, readbuf, i))
46444- return -EFAULT;
46445+ if (i > sizeof(readbuf) || (i && copy_to_user(data, readbuf, i)))
46446+ i = -EFAULT;
46447+
46448 return i;
46449 }
46450
46451diff --git a/drivers/media/radio/radio-maxiradio.c b/drivers/media/radio/radio-maxiradio.c
46452index 5236035..c622c74 100644
46453--- a/drivers/media/radio/radio-maxiradio.c
46454+++ b/drivers/media/radio/radio-maxiradio.c
46455@@ -61,7 +61,7 @@ MODULE_PARM_DESC(radio_nr, "Radio device number");
46456 /* TEA5757 pin mappings */
46457 static const int clk = 1, data = 2, wren = 4, mo_st = 8, power = 16;
46458
46459-static atomic_t maxiradio_instance = ATOMIC_INIT(0);
46460+static atomic_unchecked_t maxiradio_instance = ATOMIC_INIT(0);
46461
46462 #define PCI_VENDOR_ID_GUILLEMOT 0x5046
46463 #define PCI_DEVICE_ID_GUILLEMOT_MAXIRADIO 0x1001
46464diff --git a/drivers/media/radio/radio-shark.c b/drivers/media/radio/radio-shark.c
46465index 050b3bb..79f62b9 100644
46466--- a/drivers/media/radio/radio-shark.c
46467+++ b/drivers/media/radio/radio-shark.c
46468@@ -79,7 +79,7 @@ struct shark_device {
46469 u32 last_val;
46470 };
46471
46472-static atomic_t shark_instance = ATOMIC_INIT(0);
46473+static atomic_unchecked_t shark_instance = ATOMIC_INIT(0);
46474
46475 static void shark_write_val(struct snd_tea575x *tea, u32 val)
46476 {
46477diff --git a/drivers/media/radio/radio-shark2.c b/drivers/media/radio/radio-shark2.c
46478index 8654e0d..0608a64 100644
46479--- a/drivers/media/radio/radio-shark2.c
46480+++ b/drivers/media/radio/radio-shark2.c
46481@@ -74,7 +74,7 @@ struct shark_device {
46482 u8 *transfer_buffer;
46483 };
46484
46485-static atomic_t shark_instance = ATOMIC_INIT(0);
46486+static atomic_unchecked_t shark_instance = ATOMIC_INIT(0);
46487
46488 static int shark_write_reg(struct radio_tea5777 *tea, u64 reg)
46489 {
46490diff --git a/drivers/media/radio/radio-si476x.c b/drivers/media/radio/radio-si476x.c
46491index 633022b..7f10754 100644
46492--- a/drivers/media/radio/radio-si476x.c
46493+++ b/drivers/media/radio/radio-si476x.c
46494@@ -1445,7 +1445,7 @@ static int si476x_radio_probe(struct platform_device *pdev)
46495 struct si476x_radio *radio;
46496 struct v4l2_ctrl *ctrl;
46497
46498- static atomic_t instance = ATOMIC_INIT(0);
46499+ static atomic_unchecked_t instance = ATOMIC_INIT(0);
46500
46501 radio = devm_kzalloc(&pdev->dev, sizeof(*radio), GFP_KERNEL);
46502 if (!radio)
46503diff --git a/drivers/media/usb/dvb-usb/cinergyT2-core.c b/drivers/media/usb/dvb-usb/cinergyT2-core.c
46504index 9fd1527..8927230 100644
46505--- a/drivers/media/usb/dvb-usb/cinergyT2-core.c
46506+++ b/drivers/media/usb/dvb-usb/cinergyT2-core.c
46507@@ -50,29 +50,73 @@ static struct dvb_usb_device_properties cinergyt2_properties;
46508
46509 static int cinergyt2_streaming_ctrl(struct dvb_usb_adapter *adap, int enable)
46510 {
46511- char buf[] = { CINERGYT2_EP1_CONTROL_STREAM_TRANSFER, enable ? 1 : 0 };
46512- char result[64];
46513- return dvb_usb_generic_rw(adap->dev, buf, sizeof(buf), result,
46514- sizeof(result), 0);
46515+ char *buf;
46516+ char *result;
46517+ int retval;
46518+
46519+ buf = kmalloc(2, GFP_KERNEL);
46520+ if (buf == NULL)
46521+ return -ENOMEM;
46522+ result = kmalloc(64, GFP_KERNEL);
46523+ if (result == NULL) {
46524+ kfree(buf);
46525+ return -ENOMEM;
46526+ }
46527+
46528+ buf[0] = CINERGYT2_EP1_CONTROL_STREAM_TRANSFER;
46529+ buf[1] = enable ? 1 : 0;
46530+
46531+ retval = dvb_usb_generic_rw(adap->dev, buf, 2, result, 64, 0);
46532+
46533+ kfree(buf);
46534+ kfree(result);
46535+ return retval;
46536 }
46537
46538 static int cinergyt2_power_ctrl(struct dvb_usb_device *d, int enable)
46539 {
46540- char buf[] = { CINERGYT2_EP1_SLEEP_MODE, enable ? 0 : 1 };
46541- char state[3];
46542- return dvb_usb_generic_rw(d, buf, sizeof(buf), state, sizeof(state), 0);
46543+ char *buf;
46544+ char *state;
46545+ int retval;
46546+
46547+ buf = kmalloc(2, GFP_KERNEL);
46548+ if (buf == NULL)
46549+ return -ENOMEM;
46550+ state = kmalloc(3, GFP_KERNEL);
46551+ if (state == NULL) {
46552+ kfree(buf);
46553+ return -ENOMEM;
46554+ }
46555+
46556+ buf[0] = CINERGYT2_EP1_SLEEP_MODE;
46557+ buf[1] = enable ? 1 : 0;
46558+
46559+ retval = dvb_usb_generic_rw(d, buf, 2, state, 3, 0);
46560+
46561+ kfree(buf);
46562+ kfree(state);
46563+ return retval;
46564 }
46565
46566 static int cinergyt2_frontend_attach(struct dvb_usb_adapter *adap)
46567 {
46568- char query[] = { CINERGYT2_EP1_GET_FIRMWARE_VERSION };
46569- char state[3];
46570+ char *query;
46571+ char *state;
46572 int ret;
46573+ query = kmalloc(1, GFP_KERNEL);
46574+ if (query == NULL)
46575+ return -ENOMEM;
46576+ state = kmalloc(3, GFP_KERNEL);
46577+ if (state == NULL) {
46578+ kfree(query);
46579+ return -ENOMEM;
46580+ }
46581+
46582+ query[0] = CINERGYT2_EP1_GET_FIRMWARE_VERSION;
46583
46584 adap->fe_adap[0].fe = cinergyt2_fe_attach(adap->dev);
46585
46586- ret = dvb_usb_generic_rw(adap->dev, query, sizeof(query), state,
46587- sizeof(state), 0);
46588+ ret = dvb_usb_generic_rw(adap->dev, query, 1, state, 3, 0);
46589 if (ret < 0) {
46590 deb_rc("cinergyt2_power_ctrl() Failed to retrieve sleep "
46591 "state info\n");
46592@@ -80,7 +124,8 @@ static int cinergyt2_frontend_attach(struct dvb_usb_adapter *adap)
46593
46594 /* Copy this pointer as we are gonna need it in the release phase */
46595 cinergyt2_usb_device = adap->dev;
46596-
46597+ kfree(query);
46598+ kfree(state);
46599 return 0;
46600 }
46601
46602@@ -141,12 +186,23 @@ static int repeatable_keys[] = {
46603 static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
46604 {
46605 struct cinergyt2_state *st = d->priv;
46606- u8 key[5] = {0, 0, 0, 0, 0}, cmd = CINERGYT2_EP1_GET_RC_EVENTS;
46607+ u8 *key, *cmd;
46608 int i;
46609
46610+ cmd = kmalloc(1, GFP_KERNEL);
46611+ if (cmd == NULL)
46612+ return -EINVAL;
46613+ key = kzalloc(5, GFP_KERNEL);
46614+ if (key == NULL) {
46615+ kfree(cmd);
46616+ return -EINVAL;
46617+ }
46618+
46619+ cmd[0] = CINERGYT2_EP1_GET_RC_EVENTS;
46620+
46621 *state = REMOTE_NO_KEY_PRESSED;
46622
46623- dvb_usb_generic_rw(d, &cmd, 1, key, sizeof(key), 0);
46624+ dvb_usb_generic_rw(d, cmd, 1, key, 5, 0);
46625 if (key[4] == 0xff) {
46626 /* key repeat */
46627 st->rc_counter++;
46628@@ -157,12 +213,12 @@ static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
46629 *event = d->last_event;
46630 deb_rc("repeat key, event %x\n",
46631 *event);
46632- return 0;
46633+ goto out;
46634 }
46635 }
46636 deb_rc("repeated key (non repeatable)\n");
46637 }
46638- return 0;
46639+ goto out;
46640 }
46641
46642 /* hack to pass checksum on the custom field */
46643@@ -174,6 +230,9 @@ static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
46644
46645 deb_rc("key: %*ph\n", 5, key);
46646 }
46647+out:
46648+ kfree(cmd);
46649+ kfree(key);
46650 return 0;
46651 }
46652
46653diff --git a/drivers/media/usb/dvb-usb/cinergyT2-fe.c b/drivers/media/usb/dvb-usb/cinergyT2-fe.c
46654index c890fe4..f9b2ae6 100644
46655--- a/drivers/media/usb/dvb-usb/cinergyT2-fe.c
46656+++ b/drivers/media/usb/dvb-usb/cinergyT2-fe.c
46657@@ -145,103 +145,176 @@ static int cinergyt2_fe_read_status(struct dvb_frontend *fe,
46658 fe_status_t *status)
46659 {
46660 struct cinergyt2_fe_state *state = fe->demodulator_priv;
46661- struct dvbt_get_status_msg result;
46662- u8 cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
46663+ struct dvbt_get_status_msg *result;
46664+ u8 *cmd;
46665 int ret;
46666
46667- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (u8 *)&result,
46668- sizeof(result), 0);
46669+ cmd = kmalloc(1, GFP_KERNEL);
46670+ if (cmd == NULL)
46671+ return -ENOMEM;
46672+ result = kmalloc(sizeof(*result), GFP_KERNEL);
46673+ if (result == NULL) {
46674+ kfree(cmd);
46675+ return -ENOMEM;
46676+ }
46677+
46678+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
46679+
46680+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (u8 *)result,
46681+ sizeof(*result), 0);
46682 if (ret < 0)
46683- return ret;
46684+ goto out;
46685
46686 *status = 0;
46687
46688- if (0xffff - le16_to_cpu(result.gain) > 30)
46689+ if (0xffff - le16_to_cpu(result->gain) > 30)
46690 *status |= FE_HAS_SIGNAL;
46691- if (result.lock_bits & (1 << 6))
46692+ if (result->lock_bits & (1 << 6))
46693 *status |= FE_HAS_LOCK;
46694- if (result.lock_bits & (1 << 5))
46695+ if (result->lock_bits & (1 << 5))
46696 *status |= FE_HAS_SYNC;
46697- if (result.lock_bits & (1 << 4))
46698+ if (result->lock_bits & (1 << 4))
46699 *status |= FE_HAS_CARRIER;
46700- if (result.lock_bits & (1 << 1))
46701+ if (result->lock_bits & (1 << 1))
46702 *status |= FE_HAS_VITERBI;
46703
46704 if ((*status & (FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC)) !=
46705 (FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC))
46706 *status &= ~FE_HAS_LOCK;
46707
46708- return 0;
46709+out:
46710+ kfree(cmd);
46711+ kfree(result);
46712+ return ret;
46713 }
46714
46715 static int cinergyt2_fe_read_ber(struct dvb_frontend *fe, u32 *ber)
46716 {
46717 struct cinergyt2_fe_state *state = fe->demodulator_priv;
46718- struct dvbt_get_status_msg status;
46719- char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
46720+ struct dvbt_get_status_msg *status;
46721+ char *cmd;
46722 int ret;
46723
46724- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status,
46725- sizeof(status), 0);
46726+ cmd = kmalloc(1, GFP_KERNEL);
46727+ if (cmd == NULL)
46728+ return -ENOMEM;
46729+ status = kmalloc(sizeof(*status), GFP_KERNEL);
46730+ if (status == NULL) {
46731+ kfree(cmd);
46732+ return -ENOMEM;
46733+ }
46734+
46735+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
46736+
46737+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (char *)status,
46738+ sizeof(*status), 0);
46739 if (ret < 0)
46740- return ret;
46741+ goto out;
46742
46743- *ber = le32_to_cpu(status.viterbi_error_rate);
46744+ *ber = le32_to_cpu(status->viterbi_error_rate);
46745+out:
46746+ kfree(cmd);
46747+ kfree(status);
46748 return 0;
46749 }
46750
46751 static int cinergyt2_fe_read_unc_blocks(struct dvb_frontend *fe, u32 *unc)
46752 {
46753 struct cinergyt2_fe_state *state = fe->demodulator_priv;
46754- struct dvbt_get_status_msg status;
46755- u8 cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
46756+ struct dvbt_get_status_msg *status;
46757+ u8 *cmd;
46758 int ret;
46759
46760- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (u8 *)&status,
46761- sizeof(status), 0);
46762+ cmd = kmalloc(1, GFP_KERNEL);
46763+ if (cmd == NULL)
46764+ return -ENOMEM;
46765+ status = kmalloc(sizeof(*status), GFP_KERNEL);
46766+ if (status == NULL) {
46767+ kfree(cmd);
46768+ return -ENOMEM;
46769+ }
46770+
46771+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
46772+
46773+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (u8 *)status,
46774+ sizeof(*status), 0);
46775 if (ret < 0) {
46776 err("cinergyt2_fe_read_unc_blocks() Failed! (Error=%d)\n",
46777 ret);
46778- return ret;
46779+ goto out;
46780 }
46781- *unc = le32_to_cpu(status.uncorrected_block_count);
46782- return 0;
46783+ *unc = le32_to_cpu(status->uncorrected_block_count);
46784+
46785+out:
46786+ kfree(cmd);
46787+ kfree(status);
46788+ return ret;
46789 }
46790
46791 static int cinergyt2_fe_read_signal_strength(struct dvb_frontend *fe,
46792 u16 *strength)
46793 {
46794 struct cinergyt2_fe_state *state = fe->demodulator_priv;
46795- struct dvbt_get_status_msg status;
46796- char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
46797+ struct dvbt_get_status_msg *status;
46798+ char *cmd;
46799 int ret;
46800
46801- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status,
46802- sizeof(status), 0);
46803+ cmd = kmalloc(1, GFP_KERNEL);
46804+ if (cmd == NULL)
46805+ return -ENOMEM;
46806+ status = kmalloc(sizeof(*status), GFP_KERNEL);
46807+ if (status == NULL) {
46808+ kfree(cmd);
46809+ return -ENOMEM;
46810+ }
46811+
46812+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
46813+
46814+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (char *)status,
46815+ sizeof(*status), 0);
46816 if (ret < 0) {
46817 err("cinergyt2_fe_read_signal_strength() Failed!"
46818 " (Error=%d)\n", ret);
46819- return ret;
46820+ goto out;
46821 }
46822- *strength = (0xffff - le16_to_cpu(status.gain));
46823+ *strength = (0xffff - le16_to_cpu(status->gain));
46824+
46825+out:
46826+ kfree(cmd);
46827+ kfree(status);
46828 return 0;
46829 }
46830
46831 static int cinergyt2_fe_read_snr(struct dvb_frontend *fe, u16 *snr)
46832 {
46833 struct cinergyt2_fe_state *state = fe->demodulator_priv;
46834- struct dvbt_get_status_msg status;
46835- char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
46836+ struct dvbt_get_status_msg *status;
46837+ char *cmd;
46838 int ret;
46839
46840- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status,
46841- sizeof(status), 0);
46842+ cmd = kmalloc(1, GFP_KERNEL);
46843+ if (cmd == NULL)
46844+ return -ENOMEM;
46845+ status = kmalloc(sizeof(*status), GFP_KERNEL);
46846+ if (status == NULL) {
46847+ kfree(cmd);
46848+ return -ENOMEM;
46849+ }
46850+
46851+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
46852+
46853+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (char *)status,
46854+ sizeof(*status), 0);
46855 if (ret < 0) {
46856 err("cinergyt2_fe_read_snr() Failed! (Error=%d)\n", ret);
46857- return ret;
46858+ goto out;
46859 }
46860- *snr = (status.snr << 8) | status.snr;
46861- return 0;
46862+ *snr = (status->snr << 8) | status->snr;
46863+
46864+out:
46865+ kfree(cmd);
46866+ kfree(status);
46867+ return ret;
46868 }
46869
46870 static int cinergyt2_fe_init(struct dvb_frontend *fe)
46871@@ -266,35 +339,46 @@ static int cinergyt2_fe_set_frontend(struct dvb_frontend *fe)
46872 {
46873 struct dtv_frontend_properties *fep = &fe->dtv_property_cache;
46874 struct cinergyt2_fe_state *state = fe->demodulator_priv;
46875- struct dvbt_set_parameters_msg param;
46876- char result[2];
46877+ struct dvbt_set_parameters_msg *param;
46878+ char *result;
46879 int err;
46880
46881- param.cmd = CINERGYT2_EP1_SET_TUNER_PARAMETERS;
46882- param.tps = cpu_to_le16(compute_tps(fep));
46883- param.freq = cpu_to_le32(fep->frequency / 1000);
46884- param.flags = 0;
46885+ result = kmalloc(2, GFP_KERNEL);
46886+ if (result == NULL)
46887+ return -ENOMEM;
46888+ param = kmalloc(sizeof(*param), GFP_KERNEL);
46889+ if (param == NULL) {
46890+ kfree(result);
46891+ return -ENOMEM;
46892+ }
46893+
46894+ param->cmd = CINERGYT2_EP1_SET_TUNER_PARAMETERS;
46895+ param->tps = cpu_to_le16(compute_tps(fep));
46896+ param->freq = cpu_to_le32(fep->frequency / 1000);
46897+ param->flags = 0;
46898
46899 switch (fep->bandwidth_hz) {
46900 default:
46901 case 8000000:
46902- param.bandwidth = 8;
46903+ param->bandwidth = 8;
46904 break;
46905 case 7000000:
46906- param.bandwidth = 7;
46907+ param->bandwidth = 7;
46908 break;
46909 case 6000000:
46910- param.bandwidth = 6;
46911+ param->bandwidth = 6;
46912 break;
46913 }
46914
46915 err = dvb_usb_generic_rw(state->d,
46916- (char *)&param, sizeof(param),
46917- result, sizeof(result), 0);
46918+ (char *)param, sizeof(*param),
46919+ result, 2, 0);
46920 if (err < 0)
46921 err("cinergyt2_fe_set_frontend() Failed! err=%d\n", err);
46922
46923- return (err < 0) ? err : 0;
46924+ kfree(result);
46925+ kfree(param);
46926+ return err;
46927 }
46928
46929 static void cinergyt2_fe_release(struct dvb_frontend *fe)
46930diff --git a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
46931index 733a7ff..f8b52e3 100644
46932--- a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
46933+++ b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
46934@@ -35,42 +35,57 @@ static int usb_cypress_writemem(struct usb_device *udev,u16 addr,u8 *data, u8 le
46935
46936 int usb_cypress_load_firmware(struct usb_device *udev, const struct firmware *fw, int type)
46937 {
46938- struct hexline hx;
46939- u8 reset;
46940+ struct hexline *hx;
46941+ u8 *reset;
46942 int ret,pos=0;
46943
46944+ reset = kmalloc(1, GFP_KERNEL);
46945+ if (reset == NULL)
46946+ return -ENOMEM;
46947+
46948+ hx = kmalloc(sizeof(struct hexline), GFP_KERNEL);
46949+ if (hx == NULL) {
46950+ kfree(reset);
46951+ return -ENOMEM;
46952+ }
46953+
46954 /* stop the CPU */
46955- reset = 1;
46956- if ((ret = usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1)) != 1)
46957+ reset[0] = 1;
46958+ if ((ret = usb_cypress_writemem(udev,cypress[type].cpu_cs_register,reset,1)) != 1)
46959 err("could not stop the USB controller CPU.");
46960
46961- while ((ret = dvb_usb_get_hexline(fw,&hx,&pos)) > 0) {
46962- deb_fw("writing to address 0x%04x (buffer: 0x%02x %02x)\n",hx.addr,hx.len,hx.chk);
46963- ret = usb_cypress_writemem(udev,hx.addr,hx.data,hx.len);
46964+ while ((ret = dvb_usb_get_hexline(fw,hx,&pos)) > 0) {
46965+ deb_fw("writing to address 0x%04x (buffer: 0x%02x %02x)\n",hx->addr,hx->len,hx->chk);
46966+ ret = usb_cypress_writemem(udev,hx->addr,hx->data,hx->len);
46967
46968- if (ret != hx.len) {
46969+ if (ret != hx->len) {
46970 err("error while transferring firmware "
46971 "(transferred size: %d, block size: %d)",
46972- ret,hx.len);
46973+ ret,hx->len);
46974 ret = -EINVAL;
46975 break;
46976 }
46977 }
46978 if (ret < 0) {
46979 err("firmware download failed at %d with %d",pos,ret);
46980+ kfree(reset);
46981+ kfree(hx);
46982 return ret;
46983 }
46984
46985 if (ret == 0) {
46986 /* restart the CPU */
46987- reset = 0;
46988- if (ret || usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1) != 1) {
46989+ reset[0] = 0;
46990+ if (ret || usb_cypress_writemem(udev,cypress[type].cpu_cs_register,reset,1) != 1) {
46991 err("could not restart the USB controller CPU.");
46992 ret = -EINVAL;
46993 }
46994 } else
46995 ret = -EIO;
46996
46997+ kfree(reset);
46998+ kfree(hx);
46999+
47000 return ret;
47001 }
47002 EXPORT_SYMBOL(usb_cypress_load_firmware);
47003diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
47004index 2add8c5..c33b854 100644
47005--- a/drivers/media/usb/dvb-usb/dw2102.c
47006+++ b/drivers/media/usb/dvb-usb/dw2102.c
47007@@ -118,7 +118,7 @@ struct su3000_state {
47008
47009 struct s6x0_state {
47010 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
47011-};
47012+} __no_const;
47013
47014 /* debug */
47015 static int dvb_usb_dw2102_debug;
47016diff --git a/drivers/media/usb/dvb-usb/technisat-usb2.c b/drivers/media/usb/dvb-usb/technisat-usb2.c
47017index 6b0b8b6b..4038398 100644
47018--- a/drivers/media/usb/dvb-usb/technisat-usb2.c
47019+++ b/drivers/media/usb/dvb-usb/technisat-usb2.c
47020@@ -87,8 +87,11 @@ struct technisat_usb2_state {
47021 static int technisat_usb2_i2c_access(struct usb_device *udev,
47022 u8 device_addr, u8 *tx, u8 txlen, u8 *rx, u8 rxlen)
47023 {
47024- u8 b[64];
47025- int ret, actual_length;
47026+ u8 *b = kmalloc(64, GFP_KERNEL);
47027+ int ret, actual_length, error = 0;
47028+
47029+ if (b == NULL)
47030+ return -ENOMEM;
47031
47032 deb_i2c("i2c-access: %02x, tx: ", device_addr);
47033 debug_dump(tx, txlen, deb_i2c);
47034@@ -121,7 +124,8 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
47035
47036 if (ret < 0) {
47037 err("i2c-error: out failed %02x = %d", device_addr, ret);
47038- return -ENODEV;
47039+ error = -ENODEV;
47040+ goto out;
47041 }
47042
47043 ret = usb_bulk_msg(udev,
47044@@ -129,7 +133,8 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
47045 b, 64, &actual_length, 1000);
47046 if (ret < 0) {
47047 err("i2c-error: in failed %02x = %d", device_addr, ret);
47048- return -ENODEV;
47049+ error = -ENODEV;
47050+ goto out;
47051 }
47052
47053 if (b[0] != I2C_STATUS_OK) {
47054@@ -137,8 +142,10 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
47055 /* handle tuner-i2c-nak */
47056 if (!(b[0] == I2C_STATUS_NAK &&
47057 device_addr == 0x60
47058- /* && device_is_technisat_usb2 */))
47059- return -ENODEV;
47060+ /* && device_is_technisat_usb2 */)) {
47061+ error = -ENODEV;
47062+ goto out;
47063+ }
47064 }
47065
47066 deb_i2c("status: %d, ", b[0]);
47067@@ -152,7 +159,9 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
47068
47069 deb_i2c("\n");
47070
47071- return 0;
47072+out:
47073+ kfree(b);
47074+ return error;
47075 }
47076
47077 static int technisat_usb2_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msg,
47078@@ -224,14 +233,16 @@ static int technisat_usb2_set_led(struct dvb_usb_device *d, int red, enum techni
47079 {
47080 int ret;
47081
47082- u8 led[8] = {
47083- red ? SET_RED_LED_VENDOR_REQUEST : SET_GREEN_LED_VENDOR_REQUEST,
47084- 0
47085- };
47086+ u8 *led = kzalloc(8, GFP_KERNEL);
47087+
47088+ if (led == NULL)
47089+ return -ENOMEM;
47090
47091 if (disable_led_control && state != TECH_LED_OFF)
47092 return 0;
47093
47094+ led[0] = red ? SET_RED_LED_VENDOR_REQUEST : SET_GREEN_LED_VENDOR_REQUEST;
47095+
47096 switch (state) {
47097 case TECH_LED_ON:
47098 led[1] = 0x82;
47099@@ -263,16 +274,22 @@ static int technisat_usb2_set_led(struct dvb_usb_device *d, int red, enum techni
47100 red ? SET_RED_LED_VENDOR_REQUEST : SET_GREEN_LED_VENDOR_REQUEST,
47101 USB_TYPE_VENDOR | USB_DIR_OUT,
47102 0, 0,
47103- led, sizeof(led), 500);
47104+ led, 8, 500);
47105
47106 mutex_unlock(&d->i2c_mutex);
47107+
47108+ kfree(led);
47109+
47110 return ret;
47111 }
47112
47113 static int technisat_usb2_set_led_timer(struct dvb_usb_device *d, u8 red, u8 green)
47114 {
47115 int ret;
47116- u8 b = 0;
47117+ u8 *b = kzalloc(1, GFP_KERNEL);
47118+
47119+ if (b == NULL)
47120+ return -ENOMEM;
47121
47122 if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
47123 return -EAGAIN;
47124@@ -281,10 +298,12 @@ static int technisat_usb2_set_led_timer(struct dvb_usb_device *d, u8 red, u8 gre
47125 SET_LED_TIMER_DIVIDER_VENDOR_REQUEST,
47126 USB_TYPE_VENDOR | USB_DIR_OUT,
47127 (red << 8) | green, 0,
47128- &b, 1, 500);
47129+ b, 1, 500);
47130
47131 mutex_unlock(&d->i2c_mutex);
47132
47133+ kfree(b);
47134+
47135 return ret;
47136 }
47137
47138@@ -328,7 +347,7 @@ static int technisat_usb2_identify_state(struct usb_device *udev,
47139 struct dvb_usb_device_description **desc, int *cold)
47140 {
47141 int ret;
47142- u8 version[3];
47143+ u8 *version = kmalloc(3, GFP_KERNEL);
47144
47145 /* first select the interface */
47146 if (usb_set_interface(udev, 0, 1) != 0)
47147@@ -338,11 +357,14 @@ static int technisat_usb2_identify_state(struct usb_device *udev,
47148
47149 *cold = 0; /* by default do not download a firmware - just in case something is wrong */
47150
47151+ if (version == NULL)
47152+ return 0;
47153+
47154 ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
47155 GET_VERSION_INFO_VENDOR_REQUEST,
47156 USB_TYPE_VENDOR | USB_DIR_IN,
47157 0, 0,
47158- version, sizeof(version), 500);
47159+ version, 3, 500);
47160
47161 if (ret < 0)
47162 *cold = 1;
47163@@ -351,6 +373,8 @@ static int technisat_usb2_identify_state(struct usb_device *udev,
47164 *cold = 0;
47165 }
47166
47167+ kfree(version);
47168+
47169 return 0;
47170 }
47171
47172@@ -591,10 +615,15 @@ static int technisat_usb2_frontend_attach(struct dvb_usb_adapter *a)
47173
47174 static int technisat_usb2_get_ir(struct dvb_usb_device *d)
47175 {
47176- u8 buf[62], *b;
47177+ u8 *buf, *b;
47178 int ret;
47179 struct ir_raw_event ev;
47180
47181+ buf = kmalloc(62, GFP_KERNEL);
47182+
47183+ if (buf == NULL)
47184+ return -ENOMEM;
47185+
47186 buf[0] = GET_IR_DATA_VENDOR_REQUEST;
47187 buf[1] = 0x08;
47188 buf[2] = 0x8f;
47189@@ -617,16 +646,20 @@ static int technisat_usb2_get_ir(struct dvb_usb_device *d)
47190 GET_IR_DATA_VENDOR_REQUEST,
47191 USB_TYPE_VENDOR | USB_DIR_IN,
47192 0x8080, 0,
47193- buf, sizeof(buf), 500);
47194+ buf, 62, 500);
47195
47196 unlock:
47197 mutex_unlock(&d->i2c_mutex);
47198
47199- if (ret < 0)
47200+ if (ret < 0) {
47201+ kfree(buf);
47202 return ret;
47203+ }
47204
47205- if (ret == 1)
47206+ if (ret == 1) {
47207+ kfree(buf);
47208 return 0; /* no key pressed */
47209+ }
47210
47211 /* decoding */
47212 b = buf+1;
47213@@ -653,6 +686,8 @@ unlock:
47214
47215 ir_raw_event_handle(d->rc_dev);
47216
47217+ kfree(buf);
47218+
47219 return 1;
47220 }
47221
47222diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
47223index cca6c2f..77b9a18 100644
47224--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
47225+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
47226@@ -328,7 +328,7 @@ struct v4l2_buffer32 {
47227 __u32 reserved;
47228 };
47229
47230-static int get_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
47231+static int get_v4l2_plane32(struct v4l2_plane __user *up, struct v4l2_plane32 __user *up32,
47232 enum v4l2_memory memory)
47233 {
47234 void __user *up_pln;
47235@@ -357,7 +357,7 @@ static int get_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
47236 return 0;
47237 }
47238
47239-static int put_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
47240+static int put_v4l2_plane32(struct v4l2_plane __user *up, struct v4l2_plane32 __user *up32,
47241 enum v4l2_memory memory)
47242 {
47243 if (copy_in_user(up32, up, 2 * sizeof(__u32)) ||
47244@@ -427,7 +427,7 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
47245 * by passing a very big num_planes value */
47246 uplane = compat_alloc_user_space(num_planes *
47247 sizeof(struct v4l2_plane));
47248- kp->m.planes = uplane;
47249+ kp->m.planes = (struct v4l2_plane __force_kernel *)uplane;
47250
47251 while (--num_planes >= 0) {
47252 ret = get_v4l2_plane32(uplane, uplane32, kp->memory);
47253@@ -498,7 +498,7 @@ static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
47254 if (num_planes == 0)
47255 return 0;
47256
47257- uplane = kp->m.planes;
47258+ uplane = (struct v4l2_plane __force_user *)kp->m.planes;
47259 if (get_user(p, &up->m.planes))
47260 return -EFAULT;
47261 uplane32 = compat_ptr(p);
47262@@ -562,7 +562,7 @@ static int get_v4l2_framebuffer32(struct v4l2_framebuffer *kp, struct v4l2_frame
47263 get_user(kp->flags, &up->flags) ||
47264 copy_from_user(&kp->fmt, &up->fmt, sizeof(up->fmt)))
47265 return -EFAULT;
47266- kp->base = compat_ptr(tmp);
47267+ kp->base = (void __force_kernel *)compat_ptr(tmp);
47268 return 0;
47269 }
47270
47271@@ -667,7 +667,7 @@ static int get_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext
47272 n * sizeof(struct v4l2_ext_control32)))
47273 return -EFAULT;
47274 kcontrols = compat_alloc_user_space(n * sizeof(struct v4l2_ext_control));
47275- kp->controls = kcontrols;
47276+ kp->controls = (struct v4l2_ext_control __force_kernel *)kcontrols;
47277 while (--n >= 0) {
47278 if (copy_in_user(kcontrols, ucontrols, sizeof(*ucontrols)))
47279 return -EFAULT;
47280@@ -689,7 +689,7 @@ static int get_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext
47281 static int put_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext_controls32 __user *up)
47282 {
47283 struct v4l2_ext_control32 __user *ucontrols;
47284- struct v4l2_ext_control __user *kcontrols = kp->controls;
47285+ struct v4l2_ext_control __user *kcontrols = (struct v4l2_ext_control __force_user *)kp->controls;
47286 int n = kp->count;
47287 compat_caddr_t p;
47288
47289@@ -783,7 +783,7 @@ static int put_v4l2_edid32(struct v4l2_edid *kp, struct v4l2_edid32 __user *up)
47290 put_user(kp->start_block, &up->start_block) ||
47291 put_user(kp->blocks, &up->blocks) ||
47292 put_user(tmp, &up->edid) ||
47293- copy_to_user(kp->reserved, up->reserved, sizeof(kp->reserved)))
47294+ copy_to_user(up->reserved, kp->reserved, sizeof(kp->reserved)))
47295 return -EFAULT;
47296 return 0;
47297 }
47298diff --git a/drivers/media/v4l2-core/v4l2-device.c b/drivers/media/v4l2-core/v4l2-device.c
47299index 015f92a..59e311e 100644
47300--- a/drivers/media/v4l2-core/v4l2-device.c
47301+++ b/drivers/media/v4l2-core/v4l2-device.c
47302@@ -75,9 +75,9 @@ int v4l2_device_put(struct v4l2_device *v4l2_dev)
47303 EXPORT_SYMBOL_GPL(v4l2_device_put);
47304
47305 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
47306- atomic_t *instance)
47307+ atomic_unchecked_t *instance)
47308 {
47309- int num = atomic_inc_return(instance) - 1;
47310+ int num = atomic_inc_return_unchecked(instance) - 1;
47311 int len = strlen(basename);
47312
47313 if (basename[len - 1] >= '0' && basename[len - 1] <= '9')
47314diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
47315index d15e167..337f374 100644
47316--- a/drivers/media/v4l2-core/v4l2-ioctl.c
47317+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
47318@@ -2142,7 +2142,8 @@ struct v4l2_ioctl_info {
47319 struct file *file, void *fh, void *p);
47320 } u;
47321 void (*debug)(const void *arg, bool write_only);
47322-};
47323+} __do_const;
47324+typedef struct v4l2_ioctl_info __no_const v4l2_ioctl_info_no_const;
47325
47326 /* This control needs a priority check */
47327 #define INFO_FL_PRIO (1 << 0)
47328@@ -2326,7 +2327,7 @@ static long __video_do_ioctl(struct file *file,
47329 struct video_device *vfd = video_devdata(file);
47330 const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops;
47331 bool write_only = false;
47332- struct v4l2_ioctl_info default_info;
47333+ v4l2_ioctl_info_no_const default_info;
47334 const struct v4l2_ioctl_info *info;
47335 void *fh = file->private_data;
47336 struct v4l2_fh *vfh = NULL;
47337@@ -2413,7 +2414,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
47338 ret = -EINVAL;
47339 break;
47340 }
47341- *user_ptr = (void __user *)buf->m.planes;
47342+ *user_ptr = (void __force_user *)buf->m.planes;
47343 *kernel_ptr = (void **)&buf->m.planes;
47344 *array_size = sizeof(struct v4l2_plane) * buf->length;
47345 ret = 1;
47346@@ -2430,7 +2431,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
47347 ret = -EINVAL;
47348 break;
47349 }
47350- *user_ptr = (void __user *)edid->edid;
47351+ *user_ptr = (void __force_user *)edid->edid;
47352 *kernel_ptr = (void **)&edid->edid;
47353 *array_size = edid->blocks * 128;
47354 ret = 1;
47355@@ -2448,7 +2449,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
47356 ret = -EINVAL;
47357 break;
47358 }
47359- *user_ptr = (void __user *)ctrls->controls;
47360+ *user_ptr = (void __force_user *)ctrls->controls;
47361 *kernel_ptr = (void **)&ctrls->controls;
47362 *array_size = sizeof(struct v4l2_ext_control)
47363 * ctrls->count;
47364@@ -2549,7 +2550,7 @@ video_usercopy(struct file *file, unsigned int cmd, unsigned long arg,
47365 }
47366
47367 if (has_array_args) {
47368- *kernel_ptr = (void __force *)user_ptr;
47369+ *kernel_ptr = (void __force_kernel *)user_ptr;
47370 if (copy_to_user(user_ptr, mbuf, array_size))
47371 err = -EFAULT;
47372 goto out_array_args;
47373diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
47374index a896d94..a5d56b1 100644
47375--- a/drivers/message/fusion/mptbase.c
47376+++ b/drivers/message/fusion/mptbase.c
47377@@ -6752,8 +6752,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
47378 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
47379 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
47380
47381+#ifdef CONFIG_GRKERNSEC_HIDESYM
47382+ seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
47383+#else
47384 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
47385 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
47386+#endif
47387+
47388 /*
47389 * Rounding UP to nearest 4-kB boundary here...
47390 */
47391@@ -6766,7 +6771,11 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
47392 ioc->facts.GlobalCredits);
47393
47394 seq_printf(m, " Frames @ 0x%p (Dma @ 0x%p)\n",
47395+#ifdef CONFIG_GRKERNSEC_HIDESYM
47396+ NULL, NULL);
47397+#else
47398 (void *)ioc->alloc, (void *)(ulong)ioc->alloc_dma);
47399+#endif
47400 sz = (ioc->reply_sz * ioc->reply_depth) + 128;
47401 seq_printf(m, " {CurRepSz=%d} x {CurRepDepth=%d} = %d bytes ^= 0x%x\n",
47402 ioc->reply_sz, ioc->reply_depth, ioc->reply_sz*ioc->reply_depth, sz);
47403diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
47404index 0707fa2..70ca794 100644
47405--- a/drivers/message/fusion/mptsas.c
47406+++ b/drivers/message/fusion/mptsas.c
47407@@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
47408 return 0;
47409 }
47410
47411+static inline void
47412+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
47413+{
47414+ if (phy_info->port_details) {
47415+ phy_info->port_details->rphy = rphy;
47416+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
47417+ ioc->name, rphy));
47418+ }
47419+
47420+ if (rphy) {
47421+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
47422+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
47423+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
47424+ ioc->name, rphy, rphy->dev.release));
47425+ }
47426+}
47427+
47428 /* no mutex */
47429 static void
47430 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
47431@@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
47432 return NULL;
47433 }
47434
47435-static inline void
47436-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
47437-{
47438- if (phy_info->port_details) {
47439- phy_info->port_details->rphy = rphy;
47440- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
47441- ioc->name, rphy));
47442- }
47443-
47444- if (rphy) {
47445- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
47446- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
47447- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
47448- ioc->name, rphy, rphy->dev.release));
47449- }
47450-}
47451-
47452 static inline struct sas_port *
47453 mptsas_get_port(struct mptsas_phyinfo *phy_info)
47454 {
47455diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
47456index b7d87cd..3fb36da 100644
47457--- a/drivers/message/i2o/i2o_proc.c
47458+++ b/drivers/message/i2o/i2o_proc.c
47459@@ -255,12 +255,6 @@ static char *scsi_devices[] = {
47460 "Array Controller Device"
47461 };
47462
47463-static char *chtostr(char *tmp, u8 *chars, int n)
47464-{
47465- tmp[0] = 0;
47466- return strncat(tmp, (char *)chars, n);
47467-}
47468-
47469 static int i2o_report_query_status(struct seq_file *seq, int block_status,
47470 char *group)
47471 {
47472@@ -707,9 +701,9 @@ static int i2o_seq_show_status(struct seq_file *seq, void *v)
47473 static int i2o_seq_show_hw(struct seq_file *seq, void *v)
47474 {
47475 struct i2o_controller *c = (struct i2o_controller *)seq->private;
47476- static u32 work32[5];
47477- static u8 *work8 = (u8 *) work32;
47478- static u16 *work16 = (u16 *) work32;
47479+ u32 work32[5];
47480+ u8 *work8 = (u8 *) work32;
47481+ u16 *work16 = (u16 *) work32;
47482 int token;
47483 u32 hwcap;
47484
47485@@ -790,7 +784,6 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
47486 } *result;
47487
47488 i2o_exec_execute_ddm_table ddm_table;
47489- char tmp[28 + 1];
47490
47491 result = kmalloc(sizeof(*result), GFP_KERNEL);
47492 if (!result)
47493@@ -825,8 +818,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
47494
47495 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
47496 seq_printf(seq, "%-#8x", ddm_table.module_id);
47497- seq_printf(seq, "%-29s",
47498- chtostr(tmp, ddm_table.module_name_version, 28));
47499+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
47500 seq_printf(seq, "%9d ", ddm_table.data_size);
47501 seq_printf(seq, "%8d", ddm_table.code_size);
47502
47503@@ -893,7 +885,6 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
47504
47505 i2o_driver_result_table *result;
47506 i2o_driver_store_table *dst;
47507- char tmp[28 + 1];
47508
47509 result = kmalloc(sizeof(i2o_driver_result_table), GFP_KERNEL);
47510 if (result == NULL)
47511@@ -928,9 +919,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
47512
47513 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
47514 seq_printf(seq, "%-#8x", dst->module_id);
47515- seq_printf(seq, "%-29s",
47516- chtostr(tmp, dst->module_name_version, 28));
47517- seq_printf(seq, "%-9s", chtostr(tmp, dst->date, 8));
47518+ seq_printf(seq, "%-.28s", dst->module_name_version);
47519+ seq_printf(seq, "%-.8s", dst->date);
47520 seq_printf(seq, "%8d ", dst->module_size);
47521 seq_printf(seq, "%8d ", dst->mpb_size);
47522 seq_printf(seq, "0x%04x", dst->module_flags);
47523@@ -1246,11 +1236,10 @@ static int i2o_seq_show_authorized_users(struct seq_file *seq, void *v)
47524 static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
47525 {
47526 struct i2o_device *d = (struct i2o_device *)seq->private;
47527- static u32 work32[128]; // allow for "stuff" + up to 256 byte (max) serial number
47528+ u32 work32[128]; // allow for "stuff" + up to 256 byte (max) serial number
47529 // == (allow) 512d bytes (max)
47530- static u16 *work16 = (u16 *) work32;
47531+ u16 *work16 = (u16 *) work32;
47532 int token;
47533- char tmp[16 + 1];
47534
47535 token = i2o_parm_field_get(d, 0xF100, -1, &work32, sizeof(work32));
47536
47537@@ -1262,14 +1251,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
47538 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
47539 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
47540 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
47541- seq_printf(seq, "Vendor info : %s\n",
47542- chtostr(tmp, (u8 *) (work32 + 2), 16));
47543- seq_printf(seq, "Product info : %s\n",
47544- chtostr(tmp, (u8 *) (work32 + 6), 16));
47545- seq_printf(seq, "Description : %s\n",
47546- chtostr(tmp, (u8 *) (work32 + 10), 16));
47547- seq_printf(seq, "Product rev. : %s\n",
47548- chtostr(tmp, (u8 *) (work32 + 14), 8));
47549+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
47550+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
47551+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
47552+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
47553
47554 seq_printf(seq, "Serial number : ");
47555 print_serial_number(seq, (u8 *) (work32 + 16),
47556@@ -1306,8 +1291,6 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
47557 u8 pad[256]; // allow up to 256 byte (max) serial number
47558 } result;
47559
47560- char tmp[24 + 1];
47561-
47562 token = i2o_parm_field_get(d, 0xF101, -1, &result, sizeof(result));
47563
47564 if (token < 0) {
47565@@ -1316,10 +1299,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
47566 }
47567
47568 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
47569- seq_printf(seq, "Module name : %s\n",
47570- chtostr(tmp, result.module_name, 24));
47571- seq_printf(seq, "Module revision : %s\n",
47572- chtostr(tmp, result.module_rev, 8));
47573+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
47574+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
47575
47576 seq_printf(seq, "Serial number : ");
47577 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
47578@@ -1343,8 +1324,6 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
47579 u8 instance_number[4];
47580 } result;
47581
47582- char tmp[64 + 1];
47583-
47584 token = i2o_parm_field_get(d, 0xF102, -1, &result, sizeof(result));
47585
47586 if (token < 0) {
47587@@ -1352,14 +1331,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
47588 return 0;
47589 }
47590
47591- seq_printf(seq, "Device name : %s\n",
47592- chtostr(tmp, result.device_name, 64));
47593- seq_printf(seq, "Service name : %s\n",
47594- chtostr(tmp, result.service_name, 64));
47595- seq_printf(seq, "Physical name : %s\n",
47596- chtostr(tmp, result.physical_location, 64));
47597- seq_printf(seq, "Instance number : %s\n",
47598- chtostr(tmp, result.instance_number, 4));
47599+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
47600+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
47601+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
47602+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
47603
47604 return 0;
47605 }
47606@@ -1368,9 +1343,9 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
47607 static int i2o_seq_show_sgl_limits(struct seq_file *seq, void *v)
47608 {
47609 struct i2o_device *d = (struct i2o_device *)seq->private;
47610- static u32 work32[12];
47611- static u16 *work16 = (u16 *) work32;
47612- static u8 *work8 = (u8 *) work32;
47613+ u32 work32[12];
47614+ u16 *work16 = (u16 *) work32;
47615+ u8 *work8 = (u8 *) work32;
47616 int token;
47617
47618 token = i2o_parm_field_get(d, 0xF103, -1, &work32, sizeof(work32));
47619diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
47620index 92752fb..a7494f6 100644
47621--- a/drivers/message/i2o/iop.c
47622+++ b/drivers/message/i2o/iop.c
47623@@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
47624
47625 spin_lock_irqsave(&c->context_list_lock, flags);
47626
47627- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
47628- atomic_inc(&c->context_list_counter);
47629+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
47630+ atomic_inc_unchecked(&c->context_list_counter);
47631
47632- entry->context = atomic_read(&c->context_list_counter);
47633+ entry->context = atomic_read_unchecked(&c->context_list_counter);
47634
47635 list_add(&entry->list, &c->context_list);
47636
47637@@ -1076,7 +1076,7 @@ struct i2o_controller *i2o_iop_alloc(void)
47638
47639 #if BITS_PER_LONG == 64
47640 spin_lock_init(&c->context_list_lock);
47641- atomic_set(&c->context_list_counter, 0);
47642+ atomic_set_unchecked(&c->context_list_counter, 0);
47643 INIT_LIST_HEAD(&c->context_list);
47644 #endif
47645
47646diff --git a/drivers/mfd/ab8500-debugfs.c b/drivers/mfd/ab8500-debugfs.c
47647index b2c7e3b..85aa4764 100644
47648--- a/drivers/mfd/ab8500-debugfs.c
47649+++ b/drivers/mfd/ab8500-debugfs.c
47650@@ -100,7 +100,7 @@ static int irq_last;
47651 static u32 *irq_count;
47652 static int num_irqs;
47653
47654-static struct device_attribute **dev_attr;
47655+static device_attribute_no_const **dev_attr;
47656 static char **event_name;
47657
47658 static u8 avg_sample = SAMPLE_16;
47659diff --git a/drivers/mfd/max8925-i2c.c b/drivers/mfd/max8925-i2c.c
47660index ecbe78e..b2ca870 100644
47661--- a/drivers/mfd/max8925-i2c.c
47662+++ b/drivers/mfd/max8925-i2c.c
47663@@ -152,7 +152,7 @@ static int max8925_probe(struct i2c_client *client,
47664 const struct i2c_device_id *id)
47665 {
47666 struct max8925_platform_data *pdata = dev_get_platdata(&client->dev);
47667- static struct max8925_chip *chip;
47668+ struct max8925_chip *chip;
47669 struct device_node *node = client->dev.of_node;
47670
47671 if (node && !pdata) {
47672diff --git a/drivers/mfd/tps65910.c b/drivers/mfd/tps65910.c
47673index f243e75..322176c 100644
47674--- a/drivers/mfd/tps65910.c
47675+++ b/drivers/mfd/tps65910.c
47676@@ -230,7 +230,7 @@ static int tps65910_irq_init(struct tps65910 *tps65910, int irq,
47677 struct tps65910_platform_data *pdata)
47678 {
47679 int ret = 0;
47680- static struct regmap_irq_chip *tps6591x_irqs_chip;
47681+ struct regmap_irq_chip *tps6591x_irqs_chip;
47682
47683 if (!irq) {
47684 dev_warn(tps65910->dev, "No interrupt support, no core IRQ\n");
47685diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
47686index b1dabba..24a88f2 100644
47687--- a/drivers/mfd/twl4030-irq.c
47688+++ b/drivers/mfd/twl4030-irq.c
47689@@ -34,6 +34,7 @@
47690 #include <linux/of.h>
47691 #include <linux/irqdomain.h>
47692 #include <linux/i2c/twl.h>
47693+#include <asm/pgtable.h>
47694
47695 #include "twl-core.h"
47696
47697@@ -725,10 +726,12 @@ int twl4030_init_irq(struct device *dev, int irq_num)
47698 * Install an irq handler for each of the SIH modules;
47699 * clone dummy irq_chip since PIH can't *do* anything
47700 */
47701- twl4030_irq_chip = dummy_irq_chip;
47702- twl4030_irq_chip.name = "twl4030";
47703+ pax_open_kernel();
47704+ memcpy((void *)&twl4030_irq_chip, &dummy_irq_chip, sizeof twl4030_irq_chip);
47705+ *(const char **)&twl4030_irq_chip.name = "twl4030";
47706
47707- twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
47708+ *(void **)&twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
47709+ pax_close_kernel();
47710
47711 for (i = irq_base; i < irq_end; i++) {
47712 irq_set_chip_and_handler(i, &twl4030_irq_chip,
47713diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c
47714index 464419b..64bae8d 100644
47715--- a/drivers/misc/c2port/core.c
47716+++ b/drivers/misc/c2port/core.c
47717@@ -922,7 +922,9 @@ struct c2port_device *c2port_device_register(char *name,
47718 goto error_idr_alloc;
47719 c2dev->id = ret;
47720
47721- bin_attr_flash_data.size = ops->blocks_num * ops->block_size;
47722+ pax_open_kernel();
47723+ *(size_t *)&bin_attr_flash_data.size = ops->blocks_num * ops->block_size;
47724+ pax_close_kernel();
47725
47726 c2dev->dev = device_create(c2port_class, NULL, 0, c2dev,
47727 "c2port%d", c2dev->id);
47728diff --git a/drivers/misc/eeprom/sunxi_sid.c b/drivers/misc/eeprom/sunxi_sid.c
47729index 3f2b625..945e179 100644
47730--- a/drivers/misc/eeprom/sunxi_sid.c
47731+++ b/drivers/misc/eeprom/sunxi_sid.c
47732@@ -126,7 +126,9 @@ static int sunxi_sid_probe(struct platform_device *pdev)
47733
47734 platform_set_drvdata(pdev, sid_data);
47735
47736- sid_bin_attr.size = sid_data->keysize;
47737+ pax_open_kernel();
47738+ *(size_t *)&sid_bin_attr.size = sid_data->keysize;
47739+ pax_close_kernel();
47740 if (device_create_bin_file(&pdev->dev, &sid_bin_attr))
47741 return -ENODEV;
47742
47743diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
47744index 36f5d52..32311c3 100644
47745--- a/drivers/misc/kgdbts.c
47746+++ b/drivers/misc/kgdbts.c
47747@@ -834,7 +834,7 @@ static void run_plant_and_detach_test(int is_early)
47748 char before[BREAK_INSTR_SIZE];
47749 char after[BREAK_INSTR_SIZE];
47750
47751- probe_kernel_read(before, (char *)kgdbts_break_test,
47752+ probe_kernel_read(before, ktla_ktva((char *)kgdbts_break_test),
47753 BREAK_INSTR_SIZE);
47754 init_simple_test();
47755 ts.tst = plant_and_detach_test;
47756@@ -842,7 +842,7 @@ static void run_plant_and_detach_test(int is_early)
47757 /* Activate test with initial breakpoint */
47758 if (!is_early)
47759 kgdb_breakpoint();
47760- probe_kernel_read(after, (char *)kgdbts_break_test,
47761+ probe_kernel_read(after, ktla_ktva((char *)kgdbts_break_test),
47762 BREAK_INSTR_SIZE);
47763 if (memcmp(before, after, BREAK_INSTR_SIZE)) {
47764 printk(KERN_CRIT "kgdbts: ERROR kgdb corrupted memory\n");
47765diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
47766index 3ef4627..8d00486 100644
47767--- a/drivers/misc/lis3lv02d/lis3lv02d.c
47768+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
47769@@ -497,7 +497,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *data)
47770 * the lid is closed. This leads to interrupts as soon as a little move
47771 * is done.
47772 */
47773- atomic_inc(&lis3->count);
47774+ atomic_inc_unchecked(&lis3->count);
47775
47776 wake_up_interruptible(&lis3->misc_wait);
47777 kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
47778@@ -583,7 +583,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
47779 if (lis3->pm_dev)
47780 pm_runtime_get_sync(lis3->pm_dev);
47781
47782- atomic_set(&lis3->count, 0);
47783+ atomic_set_unchecked(&lis3->count, 0);
47784 return 0;
47785 }
47786
47787@@ -615,7 +615,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
47788 add_wait_queue(&lis3->misc_wait, &wait);
47789 while (true) {
47790 set_current_state(TASK_INTERRUPTIBLE);
47791- data = atomic_xchg(&lis3->count, 0);
47792+ data = atomic_xchg_unchecked(&lis3->count, 0);
47793 if (data)
47794 break;
47795
47796@@ -656,7 +656,7 @@ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
47797 struct lis3lv02d, miscdev);
47798
47799 poll_wait(file, &lis3->misc_wait, wait);
47800- if (atomic_read(&lis3->count))
47801+ if (atomic_read_unchecked(&lis3->count))
47802 return POLLIN | POLLRDNORM;
47803 return 0;
47804 }
47805diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
47806index c439c82..1f20f57 100644
47807--- a/drivers/misc/lis3lv02d/lis3lv02d.h
47808+++ b/drivers/misc/lis3lv02d/lis3lv02d.h
47809@@ -297,7 +297,7 @@ struct lis3lv02d {
47810 struct input_polled_dev *idev; /* input device */
47811 struct platform_device *pdev; /* platform device */
47812 struct regulator_bulk_data regulators[2];
47813- atomic_t count; /* interrupt count after last read */
47814+ atomic_unchecked_t count; /* interrupt count after last read */
47815 union axis_conversion ac; /* hw -> logical axis */
47816 int mapped_btns[3];
47817
47818diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
47819index 2f30bad..c4c13d0 100644
47820--- a/drivers/misc/sgi-gru/gruhandles.c
47821+++ b/drivers/misc/sgi-gru/gruhandles.c
47822@@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
47823 unsigned long nsec;
47824
47825 nsec = CLKS2NSEC(clks);
47826- atomic_long_inc(&mcs_op_statistics[op].count);
47827- atomic_long_add(nsec, &mcs_op_statistics[op].total);
47828+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
47829+ atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
47830 if (mcs_op_statistics[op].max < nsec)
47831 mcs_op_statistics[op].max = nsec;
47832 }
47833diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
47834index 4f76359..cdfcb2e 100644
47835--- a/drivers/misc/sgi-gru/gruprocfs.c
47836+++ b/drivers/misc/sgi-gru/gruprocfs.c
47837@@ -32,9 +32,9 @@
47838
47839 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
47840
47841-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
47842+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
47843 {
47844- unsigned long val = atomic_long_read(v);
47845+ unsigned long val = atomic_long_read_unchecked(v);
47846
47847 seq_printf(s, "%16lu %s\n", val, id);
47848 }
47849@@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
47850
47851 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
47852 for (op = 0; op < mcsop_last; op++) {
47853- count = atomic_long_read(&mcs_op_statistics[op].count);
47854- total = atomic_long_read(&mcs_op_statistics[op].total);
47855+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
47856+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
47857 max = mcs_op_statistics[op].max;
47858 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
47859 count ? total / count : 0, max);
47860diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
47861index 5c3ce24..4915ccb 100644
47862--- a/drivers/misc/sgi-gru/grutables.h
47863+++ b/drivers/misc/sgi-gru/grutables.h
47864@@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
47865 * GRU statistics.
47866 */
47867 struct gru_stats_s {
47868- atomic_long_t vdata_alloc;
47869- atomic_long_t vdata_free;
47870- atomic_long_t gts_alloc;
47871- atomic_long_t gts_free;
47872- atomic_long_t gms_alloc;
47873- atomic_long_t gms_free;
47874- atomic_long_t gts_double_allocate;
47875- atomic_long_t assign_context;
47876- atomic_long_t assign_context_failed;
47877- atomic_long_t free_context;
47878- atomic_long_t load_user_context;
47879- atomic_long_t load_kernel_context;
47880- atomic_long_t lock_kernel_context;
47881- atomic_long_t unlock_kernel_context;
47882- atomic_long_t steal_user_context;
47883- atomic_long_t steal_kernel_context;
47884- atomic_long_t steal_context_failed;
47885- atomic_long_t nopfn;
47886- atomic_long_t asid_new;
47887- atomic_long_t asid_next;
47888- atomic_long_t asid_wrap;
47889- atomic_long_t asid_reuse;
47890- atomic_long_t intr;
47891- atomic_long_t intr_cbr;
47892- atomic_long_t intr_tfh;
47893- atomic_long_t intr_spurious;
47894- atomic_long_t intr_mm_lock_failed;
47895- atomic_long_t call_os;
47896- atomic_long_t call_os_wait_queue;
47897- atomic_long_t user_flush_tlb;
47898- atomic_long_t user_unload_context;
47899- atomic_long_t user_exception;
47900- atomic_long_t set_context_option;
47901- atomic_long_t check_context_retarget_intr;
47902- atomic_long_t check_context_unload;
47903- atomic_long_t tlb_dropin;
47904- atomic_long_t tlb_preload_page;
47905- atomic_long_t tlb_dropin_fail_no_asid;
47906- atomic_long_t tlb_dropin_fail_upm;
47907- atomic_long_t tlb_dropin_fail_invalid;
47908- atomic_long_t tlb_dropin_fail_range_active;
47909- atomic_long_t tlb_dropin_fail_idle;
47910- atomic_long_t tlb_dropin_fail_fmm;
47911- atomic_long_t tlb_dropin_fail_no_exception;
47912- atomic_long_t tfh_stale_on_fault;
47913- atomic_long_t mmu_invalidate_range;
47914- atomic_long_t mmu_invalidate_page;
47915- atomic_long_t flush_tlb;
47916- atomic_long_t flush_tlb_gru;
47917- atomic_long_t flush_tlb_gru_tgh;
47918- atomic_long_t flush_tlb_gru_zero_asid;
47919+ atomic_long_unchecked_t vdata_alloc;
47920+ atomic_long_unchecked_t vdata_free;
47921+ atomic_long_unchecked_t gts_alloc;
47922+ atomic_long_unchecked_t gts_free;
47923+ atomic_long_unchecked_t gms_alloc;
47924+ atomic_long_unchecked_t gms_free;
47925+ atomic_long_unchecked_t gts_double_allocate;
47926+ atomic_long_unchecked_t assign_context;
47927+ atomic_long_unchecked_t assign_context_failed;
47928+ atomic_long_unchecked_t free_context;
47929+ atomic_long_unchecked_t load_user_context;
47930+ atomic_long_unchecked_t load_kernel_context;
47931+ atomic_long_unchecked_t lock_kernel_context;
47932+ atomic_long_unchecked_t unlock_kernel_context;
47933+ atomic_long_unchecked_t steal_user_context;
47934+ atomic_long_unchecked_t steal_kernel_context;
47935+ atomic_long_unchecked_t steal_context_failed;
47936+ atomic_long_unchecked_t nopfn;
47937+ atomic_long_unchecked_t asid_new;
47938+ atomic_long_unchecked_t asid_next;
47939+ atomic_long_unchecked_t asid_wrap;
47940+ atomic_long_unchecked_t asid_reuse;
47941+ atomic_long_unchecked_t intr;
47942+ atomic_long_unchecked_t intr_cbr;
47943+ atomic_long_unchecked_t intr_tfh;
47944+ atomic_long_unchecked_t intr_spurious;
47945+ atomic_long_unchecked_t intr_mm_lock_failed;
47946+ atomic_long_unchecked_t call_os;
47947+ atomic_long_unchecked_t call_os_wait_queue;
47948+ atomic_long_unchecked_t user_flush_tlb;
47949+ atomic_long_unchecked_t user_unload_context;
47950+ atomic_long_unchecked_t user_exception;
47951+ atomic_long_unchecked_t set_context_option;
47952+ atomic_long_unchecked_t check_context_retarget_intr;
47953+ atomic_long_unchecked_t check_context_unload;
47954+ atomic_long_unchecked_t tlb_dropin;
47955+ atomic_long_unchecked_t tlb_preload_page;
47956+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
47957+ atomic_long_unchecked_t tlb_dropin_fail_upm;
47958+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
47959+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
47960+ atomic_long_unchecked_t tlb_dropin_fail_idle;
47961+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
47962+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
47963+ atomic_long_unchecked_t tfh_stale_on_fault;
47964+ atomic_long_unchecked_t mmu_invalidate_range;
47965+ atomic_long_unchecked_t mmu_invalidate_page;
47966+ atomic_long_unchecked_t flush_tlb;
47967+ atomic_long_unchecked_t flush_tlb_gru;
47968+ atomic_long_unchecked_t flush_tlb_gru_tgh;
47969+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
47970
47971- atomic_long_t copy_gpa;
47972- atomic_long_t read_gpa;
47973+ atomic_long_unchecked_t copy_gpa;
47974+ atomic_long_unchecked_t read_gpa;
47975
47976- atomic_long_t mesq_receive;
47977- atomic_long_t mesq_receive_none;
47978- atomic_long_t mesq_send;
47979- atomic_long_t mesq_send_failed;
47980- atomic_long_t mesq_noop;
47981- atomic_long_t mesq_send_unexpected_error;
47982- atomic_long_t mesq_send_lb_overflow;
47983- atomic_long_t mesq_send_qlimit_reached;
47984- atomic_long_t mesq_send_amo_nacked;
47985- atomic_long_t mesq_send_put_nacked;
47986- atomic_long_t mesq_page_overflow;
47987- atomic_long_t mesq_qf_locked;
47988- atomic_long_t mesq_qf_noop_not_full;
47989- atomic_long_t mesq_qf_switch_head_failed;
47990- atomic_long_t mesq_qf_unexpected_error;
47991- atomic_long_t mesq_noop_unexpected_error;
47992- atomic_long_t mesq_noop_lb_overflow;
47993- atomic_long_t mesq_noop_qlimit_reached;
47994- atomic_long_t mesq_noop_amo_nacked;
47995- atomic_long_t mesq_noop_put_nacked;
47996- atomic_long_t mesq_noop_page_overflow;
47997+ atomic_long_unchecked_t mesq_receive;
47998+ atomic_long_unchecked_t mesq_receive_none;
47999+ atomic_long_unchecked_t mesq_send;
48000+ atomic_long_unchecked_t mesq_send_failed;
48001+ atomic_long_unchecked_t mesq_noop;
48002+ atomic_long_unchecked_t mesq_send_unexpected_error;
48003+ atomic_long_unchecked_t mesq_send_lb_overflow;
48004+ atomic_long_unchecked_t mesq_send_qlimit_reached;
48005+ atomic_long_unchecked_t mesq_send_amo_nacked;
48006+ atomic_long_unchecked_t mesq_send_put_nacked;
48007+ atomic_long_unchecked_t mesq_page_overflow;
48008+ atomic_long_unchecked_t mesq_qf_locked;
48009+ atomic_long_unchecked_t mesq_qf_noop_not_full;
48010+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
48011+ atomic_long_unchecked_t mesq_qf_unexpected_error;
48012+ atomic_long_unchecked_t mesq_noop_unexpected_error;
48013+ atomic_long_unchecked_t mesq_noop_lb_overflow;
48014+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
48015+ atomic_long_unchecked_t mesq_noop_amo_nacked;
48016+ atomic_long_unchecked_t mesq_noop_put_nacked;
48017+ atomic_long_unchecked_t mesq_noop_page_overflow;
48018
48019 };
48020
48021@@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
48022 tghop_invalidate, mcsop_last};
48023
48024 struct mcs_op_statistic {
48025- atomic_long_t count;
48026- atomic_long_t total;
48027+ atomic_long_unchecked_t count;
48028+ atomic_long_unchecked_t total;
48029 unsigned long max;
48030 };
48031
48032@@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
48033
48034 #define STAT(id) do { \
48035 if (gru_options & OPT_STATS) \
48036- atomic_long_inc(&gru_stats.id); \
48037+ atomic_long_inc_unchecked(&gru_stats.id); \
48038 } while (0)
48039
48040 #ifdef CONFIG_SGI_GRU_DEBUG
48041diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
48042index c862cd4..0d176fe 100644
48043--- a/drivers/misc/sgi-xp/xp.h
48044+++ b/drivers/misc/sgi-xp/xp.h
48045@@ -288,7 +288,7 @@ struct xpc_interface {
48046 xpc_notify_func, void *);
48047 void (*received) (short, int, void *);
48048 enum xp_retval (*partid_to_nasids) (short, void *);
48049-};
48050+} __no_const;
48051
48052 extern struct xpc_interface xpc_interface;
48053
48054diff --git a/drivers/misc/sgi-xp/xp_main.c b/drivers/misc/sgi-xp/xp_main.c
48055index 01be66d..e3a0c7e 100644
48056--- a/drivers/misc/sgi-xp/xp_main.c
48057+++ b/drivers/misc/sgi-xp/xp_main.c
48058@@ -78,13 +78,13 @@ xpc_notloaded(void)
48059 }
48060
48061 struct xpc_interface xpc_interface = {
48062- (void (*)(int))xpc_notloaded,
48063- (void (*)(int))xpc_notloaded,
48064- (enum xp_retval(*)(short, int, u32, void *, u16))xpc_notloaded,
48065- (enum xp_retval(*)(short, int, u32, void *, u16, xpc_notify_func,
48066+ .connect = (void (*)(int))xpc_notloaded,
48067+ .disconnect = (void (*)(int))xpc_notloaded,
48068+ .send = (enum xp_retval(*)(short, int, u32, void *, u16))xpc_notloaded,
48069+ .send_notify = (enum xp_retval(*)(short, int, u32, void *, u16, xpc_notify_func,
48070 void *))xpc_notloaded,
48071- (void (*)(short, int, void *))xpc_notloaded,
48072- (enum xp_retval(*)(short, void *))xpc_notloaded
48073+ .received = (void (*)(short, int, void *))xpc_notloaded,
48074+ .partid_to_nasids = (enum xp_retval(*)(short, void *))xpc_notloaded
48075 };
48076 EXPORT_SYMBOL_GPL(xpc_interface);
48077
48078diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
48079index b94d5f7..7f494c5 100644
48080--- a/drivers/misc/sgi-xp/xpc.h
48081+++ b/drivers/misc/sgi-xp/xpc.h
48082@@ -835,6 +835,7 @@ struct xpc_arch_operations {
48083 void (*received_payload) (struct xpc_channel *, void *);
48084 void (*notify_senders_of_disconnect) (struct xpc_channel *);
48085 };
48086+typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
48087
48088 /* struct xpc_partition act_state values (for XPC HB) */
48089
48090@@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
48091 /* found in xpc_main.c */
48092 extern struct device *xpc_part;
48093 extern struct device *xpc_chan;
48094-extern struct xpc_arch_operations xpc_arch_ops;
48095+extern xpc_arch_operations_no_const xpc_arch_ops;
48096 extern int xpc_disengage_timelimit;
48097 extern int xpc_disengage_timedout;
48098 extern int xpc_activate_IRQ_rcvd;
48099diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
48100index 82dc574..8539ab2 100644
48101--- a/drivers/misc/sgi-xp/xpc_main.c
48102+++ b/drivers/misc/sgi-xp/xpc_main.c
48103@@ -166,7 +166,7 @@ static struct notifier_block xpc_die_notifier = {
48104 .notifier_call = xpc_system_die,
48105 };
48106
48107-struct xpc_arch_operations xpc_arch_ops;
48108+xpc_arch_operations_no_const xpc_arch_ops;
48109
48110 /*
48111 * Timer function to enforce the timelimit on the partition disengage.
48112@@ -1210,7 +1210,7 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *_die_args)
48113
48114 if (((die_args->trapnr == X86_TRAP_MF) ||
48115 (die_args->trapnr == X86_TRAP_XF)) &&
48116- !user_mode_vm(die_args->regs))
48117+ !user_mode(die_args->regs))
48118 xpc_die_deactivate();
48119
48120 break;
48121diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
48122index ede41f0..744fbd9 100644
48123--- a/drivers/mmc/card/block.c
48124+++ b/drivers/mmc/card/block.c
48125@@ -574,7 +574,7 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
48126 if (idata->ic.postsleep_min_us)
48127 usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
48128
48129- if (copy_to_user(&(ic_ptr->response), cmd.resp, sizeof(cmd.resp))) {
48130+ if (copy_to_user(ic_ptr->response, cmd.resp, sizeof(cmd.resp))) {
48131 err = -EFAULT;
48132 goto cmd_rel_host;
48133 }
48134diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
48135index f51b5ba..86614a7 100644
48136--- a/drivers/mmc/core/mmc_ops.c
48137+++ b/drivers/mmc/core/mmc_ops.c
48138@@ -247,7 +247,7 @@ mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
48139 void *data_buf;
48140 int is_on_stack;
48141
48142- is_on_stack = object_is_on_stack(buf);
48143+ is_on_stack = object_starts_on_stack(buf);
48144 if (is_on_stack) {
48145 /*
48146 * dma onto stack is unsafe/nonportable, but callers to this
48147diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
48148index 08fd956..370487a 100644
48149--- a/drivers/mmc/host/dw_mmc.h
48150+++ b/drivers/mmc/host/dw_mmc.h
48151@@ -262,5 +262,5 @@ struct dw_mci_drv_data {
48152 int (*parse_dt)(struct dw_mci *host);
48153 int (*execute_tuning)(struct dw_mci_slot *slot, u32 opcode,
48154 struct dw_mci_tuning_data *tuning_data);
48155-};
48156+} __do_const;
48157 #endif /* _DW_MMC_H_ */
48158diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
48159index e4d4707..28262a3 100644
48160--- a/drivers/mmc/host/mmci.c
48161+++ b/drivers/mmc/host/mmci.c
48162@@ -1612,7 +1612,9 @@ static int mmci_probe(struct amba_device *dev,
48163 mmc->caps |= MMC_CAP_CMD23;
48164
48165 if (variant->busy_detect) {
48166- mmci_ops.card_busy = mmci_card_busy;
48167+ pax_open_kernel();
48168+ *(void **)&mmci_ops.card_busy = mmci_card_busy;
48169+ pax_close_kernel();
48170 mmci_write_datactrlreg(host, MCI_ST_DPSM_BUSYMODE);
48171 mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
48172 mmc->max_busy_timeout = 0;
48173diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
48174index ccec0e3..199f9ce 100644
48175--- a/drivers/mmc/host/sdhci-esdhc-imx.c
48176+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
48177@@ -1034,9 +1034,12 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
48178 host->mmc->caps |= MMC_CAP_1_8V_DDR;
48179 }
48180
48181- if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING)
48182- sdhci_esdhc_ops.platform_execute_tuning =
48183+ if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) {
48184+ pax_open_kernel();
48185+ *(void **)&sdhci_esdhc_ops.platform_execute_tuning =
48186 esdhc_executing_tuning;
48187+ pax_close_kernel();
48188+ }
48189
48190 if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING)
48191 writel(readl(host->ioaddr + ESDHC_TUNING_CTRL) |
48192diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
48193index fa5954a..56840e5 100644
48194--- a/drivers/mmc/host/sdhci-s3c.c
48195+++ b/drivers/mmc/host/sdhci-s3c.c
48196@@ -584,9 +584,11 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
48197 * we can use overriding functions instead of default.
48198 */
48199 if (sc->no_divider) {
48200- sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
48201- sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
48202- sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
48203+ pax_open_kernel();
48204+ *(void **)&sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
48205+ *(void **)&sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
48206+ *(void **)&sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
48207+ pax_close_kernel();
48208 }
48209
48210 /* It supports additional host capabilities if needed */
48211diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
48212index 423666b..81ff5eb 100644
48213--- a/drivers/mtd/chips/cfi_cmdset_0020.c
48214+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
48215@@ -666,7 +666,7 @@ cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
48216 size_t totlen = 0, thislen;
48217 int ret = 0;
48218 size_t buflen = 0;
48219- static char *buffer;
48220+ char *buffer;
48221
48222 if (!ECCBUF_SIZE) {
48223 /* We should fall back to a general writev implementation.
48224diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
48225index 0b071a3..8ec3d5b 100644
48226--- a/drivers/mtd/nand/denali.c
48227+++ b/drivers/mtd/nand/denali.c
48228@@ -24,6 +24,7 @@
48229 #include <linux/slab.h>
48230 #include <linux/mtd/mtd.h>
48231 #include <linux/module.h>
48232+#include <linux/slab.h>
48233
48234 #include "denali.h"
48235
48236diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
48237index 959cb9b..8520fe5 100644
48238--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
48239+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
48240@@ -386,7 +386,7 @@ void prepare_data_dma(struct gpmi_nand_data *this, enum dma_data_direction dr)
48241
48242 /* first try to map the upper buffer directly */
48243 if (virt_addr_valid(this->upper_buf) &&
48244- !object_is_on_stack(this->upper_buf)) {
48245+ !object_starts_on_stack(this->upper_buf)) {
48246 sg_init_one(sgl, this->upper_buf, this->upper_len);
48247 ret = dma_map_sg(this->dev, sgl, 1, dr);
48248 if (ret == 0)
48249diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
48250index 51b9d6a..52af9a7 100644
48251--- a/drivers/mtd/nftlmount.c
48252+++ b/drivers/mtd/nftlmount.c
48253@@ -24,6 +24,7 @@
48254 #include <asm/errno.h>
48255 #include <linux/delay.h>
48256 #include <linux/slab.h>
48257+#include <linux/sched.h>
48258 #include <linux/mtd/mtd.h>
48259 #include <linux/mtd/nand.h>
48260 #include <linux/mtd/nftl.h>
48261diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
48262index cf49c22..971b133 100644
48263--- a/drivers/mtd/sm_ftl.c
48264+++ b/drivers/mtd/sm_ftl.c
48265@@ -56,7 +56,7 @@ static ssize_t sm_attr_show(struct device *dev, struct device_attribute *attr,
48266 #define SM_CIS_VENDOR_OFFSET 0x59
48267 static struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl)
48268 {
48269- struct attribute_group *attr_group;
48270+ attribute_group_no_const *attr_group;
48271 struct attribute **attributes;
48272 struct sm_sysfs_attribute *vendor_attribute;
48273 char *vendor;
48274diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
48275index d163e11..f517018 100644
48276--- a/drivers/net/bonding/bond_netlink.c
48277+++ b/drivers/net/bonding/bond_netlink.c
48278@@ -548,7 +548,7 @@ nla_put_failure:
48279 return -EMSGSIZE;
48280 }
48281
48282-struct rtnl_link_ops bond_link_ops __read_mostly = {
48283+struct rtnl_link_ops bond_link_ops = {
48284 .kind = "bond",
48285 .priv_size = sizeof(struct bonding),
48286 .setup = bond_setup,
48287diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
48288index 4168822..f38eeddf 100644
48289--- a/drivers/net/can/Kconfig
48290+++ b/drivers/net/can/Kconfig
48291@@ -98,7 +98,7 @@ config CAN_JANZ_ICAN3
48292
48293 config CAN_FLEXCAN
48294 tristate "Support for Freescale FLEXCAN based chips"
48295- depends on ARM || PPC
48296+ depends on (ARM && CPU_LITTLE_ENDIAN) || PPC
48297 ---help---
48298 Say Y here if you want to support for Freescale FlexCAN.
48299
48300diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
48301index 1d162cc..b546a75 100644
48302--- a/drivers/net/ethernet/8390/ax88796.c
48303+++ b/drivers/net/ethernet/8390/ax88796.c
48304@@ -889,9 +889,11 @@ static int ax_probe(struct platform_device *pdev)
48305 if (ax->plat->reg_offsets)
48306 ei_local->reg_offset = ax->plat->reg_offsets;
48307 else {
48308+ resource_size_t _mem_size = mem_size;
48309+ do_div(_mem_size, 0x18);
48310 ei_local->reg_offset = ax->reg_offsets;
48311 for (ret = 0; ret < 0x18; ret++)
48312- ax->reg_offsets[ret] = (mem_size / 0x18) * ret;
48313+ ax->reg_offsets[ret] = _mem_size * ret;
48314 }
48315
48316 if (!request_mem_region(mem->start, mem_size, pdev->name)) {
48317diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
48318index 7330681..7e9e463 100644
48319--- a/drivers/net/ethernet/altera/altera_tse_main.c
48320+++ b/drivers/net/ethernet/altera/altera_tse_main.c
48321@@ -1182,7 +1182,7 @@ static int tse_shutdown(struct net_device *dev)
48322 return 0;
48323 }
48324
48325-static struct net_device_ops altera_tse_netdev_ops = {
48326+static net_device_ops_no_const altera_tse_netdev_ops __read_only = {
48327 .ndo_open = tse_open,
48328 .ndo_stop = tse_shutdown,
48329 .ndo_start_xmit = tse_start_xmit,
48330@@ -1439,11 +1439,13 @@ static int altera_tse_probe(struct platform_device *pdev)
48331 ndev->netdev_ops = &altera_tse_netdev_ops;
48332 altera_tse_set_ethtool_ops(ndev);
48333
48334+ pax_open_kernel();
48335 altera_tse_netdev_ops.ndo_set_rx_mode = tse_set_rx_mode;
48336
48337 if (priv->hash_filter)
48338 altera_tse_netdev_ops.ndo_set_rx_mode =
48339 tse_set_rx_mode_hashfilter;
48340+ pax_close_kernel();
48341
48342 /* Scatter/gather IO is not supported,
48343 * so it is turned off
48344diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
48345index cc25a3a..c8d72d3 100644
48346--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
48347+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
48348@@ -1083,14 +1083,14 @@ do { \
48349 * operations, everything works on mask values.
48350 */
48351 #define XMDIO_READ(_pdata, _mmd, _reg) \
48352- ((_pdata)->hw_if.read_mmd_regs((_pdata), 0, \
48353+ ((_pdata)->hw_if->read_mmd_regs((_pdata), 0, \
48354 MII_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff)))
48355
48356 #define XMDIO_READ_BITS(_pdata, _mmd, _reg, _mask) \
48357 (XMDIO_READ((_pdata), _mmd, _reg) & _mask)
48358
48359 #define XMDIO_WRITE(_pdata, _mmd, _reg, _val) \
48360- ((_pdata)->hw_if.write_mmd_regs((_pdata), 0, \
48361+ ((_pdata)->hw_if->write_mmd_regs((_pdata), 0, \
48362 MII_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff), (_val)))
48363
48364 #define XMDIO_WRITE_BITS(_pdata, _mmd, _reg, _mask, _val) \
48365diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
48366index 7d6a49b..e6d403b 100644
48367--- a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
48368+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
48369@@ -188,7 +188,7 @@ static int xgbe_dcb_ieee_setets(struct net_device *netdev,
48370
48371 memcpy(pdata->ets, ets, sizeof(*pdata->ets));
48372
48373- pdata->hw_if.config_dcb_tc(pdata);
48374+ pdata->hw_if->config_dcb_tc(pdata);
48375
48376 return 0;
48377 }
48378@@ -227,7 +227,7 @@ static int xgbe_dcb_ieee_setpfc(struct net_device *netdev,
48379
48380 memcpy(pdata->pfc, pfc, sizeof(*pdata->pfc));
48381
48382- pdata->hw_if.config_dcb_pfc(pdata);
48383+ pdata->hw_if->config_dcb_pfc(pdata);
48384
48385 return 0;
48386 }
48387diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
48388index 1c5d62e..8e14d54 100644
48389--- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
48390+++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
48391@@ -236,7 +236,7 @@ err_ring:
48392
48393 static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
48394 {
48395- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48396+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48397 struct xgbe_channel *channel;
48398 struct xgbe_ring *ring;
48399 struct xgbe_ring_data *rdata;
48400@@ -277,7 +277,7 @@ static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
48401
48402 static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
48403 {
48404- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48405+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48406 struct xgbe_channel *channel;
48407 struct xgbe_ring *ring;
48408 struct xgbe_ring_desc *rdesc;
48409@@ -506,7 +506,7 @@ err_out:
48410 static void xgbe_realloc_skb(struct xgbe_channel *channel)
48411 {
48412 struct xgbe_prv_data *pdata = channel->pdata;
48413- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48414+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48415 struct xgbe_ring *ring = channel->rx_ring;
48416 struct xgbe_ring_data *rdata;
48417 struct sk_buff *skb = NULL;
48418@@ -550,17 +550,12 @@ static void xgbe_realloc_skb(struct xgbe_channel *channel)
48419 DBGPR("<--xgbe_realloc_skb\n");
48420 }
48421
48422-void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if)
48423-{
48424- DBGPR("-->xgbe_init_function_ptrs_desc\n");
48425-
48426- desc_if->alloc_ring_resources = xgbe_alloc_ring_resources;
48427- desc_if->free_ring_resources = xgbe_free_ring_resources;
48428- desc_if->map_tx_skb = xgbe_map_tx_skb;
48429- desc_if->realloc_skb = xgbe_realloc_skb;
48430- desc_if->unmap_skb = xgbe_unmap_skb;
48431- desc_if->wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init;
48432- desc_if->wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init;
48433-
48434- DBGPR("<--xgbe_init_function_ptrs_desc\n");
48435-}
48436+const struct xgbe_desc_if default_xgbe_desc_if = {
48437+ .alloc_ring_resources = xgbe_alloc_ring_resources,
48438+ .free_ring_resources = xgbe_free_ring_resources,
48439+ .map_tx_skb = xgbe_map_tx_skb,
48440+ .realloc_skb = xgbe_realloc_skb,
48441+ .unmap_skb = xgbe_unmap_skb,
48442+ .wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init,
48443+ .wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init,
48444+};
48445diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
48446index ea27383..faa8936 100644
48447--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
48448+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
48449@@ -2463,7 +2463,7 @@ static void xgbe_powerdown_rx(struct xgbe_prv_data *pdata)
48450
48451 static int xgbe_init(struct xgbe_prv_data *pdata)
48452 {
48453- struct xgbe_desc_if *desc_if = &pdata->desc_if;
48454+ struct xgbe_desc_if *desc_if = pdata->desc_if;
48455 int ret;
48456
48457 DBGPR("-->xgbe_init\n");
48458@@ -2525,101 +2525,96 @@ static int xgbe_init(struct xgbe_prv_data *pdata)
48459 return 0;
48460 }
48461
48462-void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
48463-{
48464- DBGPR("-->xgbe_init_function_ptrs\n");
48465-
48466- hw_if->tx_complete = xgbe_tx_complete;
48467-
48468- hw_if->set_promiscuous_mode = xgbe_set_promiscuous_mode;
48469- hw_if->set_all_multicast_mode = xgbe_set_all_multicast_mode;
48470- hw_if->add_mac_addresses = xgbe_add_mac_addresses;
48471- hw_if->set_mac_address = xgbe_set_mac_address;
48472-
48473- hw_if->enable_rx_csum = xgbe_enable_rx_csum;
48474- hw_if->disable_rx_csum = xgbe_disable_rx_csum;
48475-
48476- hw_if->enable_rx_vlan_stripping = xgbe_enable_rx_vlan_stripping;
48477- hw_if->disable_rx_vlan_stripping = xgbe_disable_rx_vlan_stripping;
48478- hw_if->enable_rx_vlan_filtering = xgbe_enable_rx_vlan_filtering;
48479- hw_if->disable_rx_vlan_filtering = xgbe_disable_rx_vlan_filtering;
48480- hw_if->update_vlan_hash_table = xgbe_update_vlan_hash_table;
48481-
48482- hw_if->read_mmd_regs = xgbe_read_mmd_regs;
48483- hw_if->write_mmd_regs = xgbe_write_mmd_regs;
48484-
48485- hw_if->set_gmii_speed = xgbe_set_gmii_speed;
48486- hw_if->set_gmii_2500_speed = xgbe_set_gmii_2500_speed;
48487- hw_if->set_xgmii_speed = xgbe_set_xgmii_speed;
48488-
48489- hw_if->enable_tx = xgbe_enable_tx;
48490- hw_if->disable_tx = xgbe_disable_tx;
48491- hw_if->enable_rx = xgbe_enable_rx;
48492- hw_if->disable_rx = xgbe_disable_rx;
48493-
48494- hw_if->powerup_tx = xgbe_powerup_tx;
48495- hw_if->powerdown_tx = xgbe_powerdown_tx;
48496- hw_if->powerup_rx = xgbe_powerup_rx;
48497- hw_if->powerdown_rx = xgbe_powerdown_rx;
48498-
48499- hw_if->pre_xmit = xgbe_pre_xmit;
48500- hw_if->dev_read = xgbe_dev_read;
48501- hw_if->enable_int = xgbe_enable_int;
48502- hw_if->disable_int = xgbe_disable_int;
48503- hw_if->init = xgbe_init;
48504- hw_if->exit = xgbe_exit;
48505+const struct xgbe_hw_if default_xgbe_hw_if = {
48506+ .tx_complete = xgbe_tx_complete,
48507+
48508+ .set_promiscuous_mode = xgbe_set_promiscuous_mode,
48509+ .set_all_multicast_mode = xgbe_set_all_multicast_mode,
48510+ .add_mac_addresses = xgbe_add_mac_addresses,
48511+ .set_mac_address = xgbe_set_mac_address,
48512+
48513+ .enable_rx_csum = xgbe_enable_rx_csum,
48514+ .disable_rx_csum = xgbe_disable_rx_csum,
48515+
48516+ .enable_rx_vlan_stripping = xgbe_enable_rx_vlan_stripping,
48517+ .disable_rx_vlan_stripping = xgbe_disable_rx_vlan_stripping,
48518+ .enable_rx_vlan_filtering = xgbe_enable_rx_vlan_filtering,
48519+ .disable_rx_vlan_filtering = xgbe_disable_rx_vlan_filtering,
48520+ .update_vlan_hash_table = xgbe_update_vlan_hash_table,
48521+
48522+ .read_mmd_regs = xgbe_read_mmd_regs,
48523+ .write_mmd_regs = xgbe_write_mmd_regs,
48524+
48525+ .set_gmii_speed = xgbe_set_gmii_speed,
48526+ .set_gmii_2500_speed = xgbe_set_gmii_2500_speed,
48527+ .set_xgmii_speed = xgbe_set_xgmii_speed,
48528+
48529+ .enable_tx = xgbe_enable_tx,
48530+ .disable_tx = xgbe_disable_tx,
48531+ .enable_rx = xgbe_enable_rx,
48532+ .disable_rx = xgbe_disable_rx,
48533+
48534+ .powerup_tx = xgbe_powerup_tx,
48535+ .powerdown_tx = xgbe_powerdown_tx,
48536+ .powerup_rx = xgbe_powerup_rx,
48537+ .powerdown_rx = xgbe_powerdown_rx,
48538+
48539+ .pre_xmit = xgbe_pre_xmit,
48540+ .dev_read = xgbe_dev_read,
48541+ .enable_int = xgbe_enable_int,
48542+ .disable_int = xgbe_disable_int,
48543+ .init = xgbe_init,
48544+ .exit = xgbe_exit,
48545
48546 /* Descriptor related Sequences have to be initialized here */
48547- hw_if->tx_desc_init = xgbe_tx_desc_init;
48548- hw_if->rx_desc_init = xgbe_rx_desc_init;
48549- hw_if->tx_desc_reset = xgbe_tx_desc_reset;
48550- hw_if->rx_desc_reset = xgbe_rx_desc_reset;
48551- hw_if->is_last_desc = xgbe_is_last_desc;
48552- hw_if->is_context_desc = xgbe_is_context_desc;
48553+ .tx_desc_init = xgbe_tx_desc_init,
48554+ .rx_desc_init = xgbe_rx_desc_init,
48555+ .tx_desc_reset = xgbe_tx_desc_reset,
48556+ .rx_desc_reset = xgbe_rx_desc_reset,
48557+ .is_last_desc = xgbe_is_last_desc,
48558+ .is_context_desc = xgbe_is_context_desc,
48559
48560 /* For FLOW ctrl */
48561- hw_if->config_tx_flow_control = xgbe_config_tx_flow_control;
48562- hw_if->config_rx_flow_control = xgbe_config_rx_flow_control;
48563+ .config_tx_flow_control = xgbe_config_tx_flow_control,
48564+ .config_rx_flow_control = xgbe_config_rx_flow_control,
48565
48566 /* For RX coalescing */
48567- hw_if->config_rx_coalesce = xgbe_config_rx_coalesce;
48568- hw_if->config_tx_coalesce = xgbe_config_tx_coalesce;
48569- hw_if->usec_to_riwt = xgbe_usec_to_riwt;
48570- hw_if->riwt_to_usec = xgbe_riwt_to_usec;
48571+ .config_rx_coalesce = xgbe_config_rx_coalesce,
48572+ .config_tx_coalesce = xgbe_config_tx_coalesce,
48573+ .usec_to_riwt = xgbe_usec_to_riwt,
48574+ .riwt_to_usec = xgbe_riwt_to_usec,
48575
48576 /* For RX and TX threshold config */
48577- hw_if->config_rx_threshold = xgbe_config_rx_threshold;
48578- hw_if->config_tx_threshold = xgbe_config_tx_threshold;
48579+ .config_rx_threshold = xgbe_config_rx_threshold,
48580+ .config_tx_threshold = xgbe_config_tx_threshold,
48581
48582 /* For RX and TX Store and Forward Mode config */
48583- hw_if->config_rsf_mode = xgbe_config_rsf_mode;
48584- hw_if->config_tsf_mode = xgbe_config_tsf_mode;
48585+ .config_rsf_mode = xgbe_config_rsf_mode,
48586+ .config_tsf_mode = xgbe_config_tsf_mode,
48587
48588 /* For TX DMA Operating on Second Frame config */
48589- hw_if->config_osp_mode = xgbe_config_osp_mode;
48590+ .config_osp_mode = xgbe_config_osp_mode,
48591
48592 /* For RX and TX PBL config */
48593- hw_if->config_rx_pbl_val = xgbe_config_rx_pbl_val;
48594- hw_if->get_rx_pbl_val = xgbe_get_rx_pbl_val;
48595- hw_if->config_tx_pbl_val = xgbe_config_tx_pbl_val;
48596- hw_if->get_tx_pbl_val = xgbe_get_tx_pbl_val;
48597- hw_if->config_pblx8 = xgbe_config_pblx8;
48598+ .config_rx_pbl_val = xgbe_config_rx_pbl_val,
48599+ .get_rx_pbl_val = xgbe_get_rx_pbl_val,
48600+ .config_tx_pbl_val = xgbe_config_tx_pbl_val,
48601+ .get_tx_pbl_val = xgbe_get_tx_pbl_val,
48602+ .config_pblx8 = xgbe_config_pblx8,
48603
48604 /* For MMC statistics support */
48605- hw_if->tx_mmc_int = xgbe_tx_mmc_int;
48606- hw_if->rx_mmc_int = xgbe_rx_mmc_int;
48607- hw_if->read_mmc_stats = xgbe_read_mmc_stats;
48608+ .tx_mmc_int = xgbe_tx_mmc_int,
48609+ .rx_mmc_int = xgbe_rx_mmc_int,
48610+ .read_mmc_stats = xgbe_read_mmc_stats,
48611
48612 /* For PTP config */
48613- hw_if->config_tstamp = xgbe_config_tstamp;
48614- hw_if->update_tstamp_addend = xgbe_update_tstamp_addend;
48615- hw_if->set_tstamp_time = xgbe_set_tstamp_time;
48616- hw_if->get_tstamp_time = xgbe_get_tstamp_time;
48617- hw_if->get_tx_tstamp = xgbe_get_tx_tstamp;
48618+ .config_tstamp = xgbe_config_tstamp,
48619+ .update_tstamp_addend = xgbe_update_tstamp_addend,
48620+ .set_tstamp_time = xgbe_set_tstamp_time,
48621+ .get_tstamp_time = xgbe_get_tstamp_time,
48622+ .get_tx_tstamp = xgbe_get_tx_tstamp,
48623
48624 /* For Data Center Bridging config */
48625- hw_if->config_dcb_tc = xgbe_config_dcb_tc;
48626- hw_if->config_dcb_pfc = xgbe_config_dcb_pfc;
48627-
48628- DBGPR("<--xgbe_init_function_ptrs\n");
48629-}
48630+ .config_dcb_tc = xgbe_config_dcb_tc,
48631+ .config_dcb_pfc = xgbe_config_dcb_pfc,
48632+};
48633diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
48634index b26d758..b0d1c3b 100644
48635--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
48636+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
48637@@ -155,7 +155,7 @@ static int xgbe_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu)
48638
48639 static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata)
48640 {
48641- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48642+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48643 struct xgbe_channel *channel;
48644 enum xgbe_int int_id;
48645 unsigned int i;
48646@@ -177,7 +177,7 @@ static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata)
48647
48648 static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata)
48649 {
48650- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48651+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48652 struct xgbe_channel *channel;
48653 enum xgbe_int int_id;
48654 unsigned int i;
48655@@ -200,7 +200,7 @@ static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata)
48656 static irqreturn_t xgbe_isr(int irq, void *data)
48657 {
48658 struct xgbe_prv_data *pdata = data;
48659- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48660+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48661 struct xgbe_channel *channel;
48662 unsigned int dma_isr, dma_ch_isr;
48663 unsigned int mac_isr, mac_tssr;
48664@@ -447,7 +447,7 @@ static void xgbe_napi_disable(struct xgbe_prv_data *pdata, unsigned int del)
48665
48666 void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
48667 {
48668- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48669+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48670
48671 DBGPR("-->xgbe_init_tx_coalesce\n");
48672
48673@@ -461,7 +461,7 @@ void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
48674
48675 void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata)
48676 {
48677- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48678+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48679
48680 DBGPR("-->xgbe_init_rx_coalesce\n");
48681
48682@@ -475,7 +475,7 @@ void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata)
48683
48684 static void xgbe_free_tx_skbuff(struct xgbe_prv_data *pdata)
48685 {
48686- struct xgbe_desc_if *desc_if = &pdata->desc_if;
48687+ struct xgbe_desc_if *desc_if = pdata->desc_if;
48688 struct xgbe_channel *channel;
48689 struct xgbe_ring *ring;
48690 struct xgbe_ring_data *rdata;
48691@@ -500,7 +500,7 @@ static void xgbe_free_tx_skbuff(struct xgbe_prv_data *pdata)
48692
48693 static void xgbe_free_rx_skbuff(struct xgbe_prv_data *pdata)
48694 {
48695- struct xgbe_desc_if *desc_if = &pdata->desc_if;
48696+ struct xgbe_desc_if *desc_if = pdata->desc_if;
48697 struct xgbe_channel *channel;
48698 struct xgbe_ring *ring;
48699 struct xgbe_ring_data *rdata;
48700@@ -526,7 +526,7 @@ static void xgbe_free_rx_skbuff(struct xgbe_prv_data *pdata)
48701 static void xgbe_adjust_link(struct net_device *netdev)
48702 {
48703 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48704- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48705+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48706 struct phy_device *phydev = pdata->phydev;
48707 int new_state = 0;
48708
48709@@ -634,7 +634,7 @@ static void xgbe_phy_exit(struct xgbe_prv_data *pdata)
48710 int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
48711 {
48712 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48713- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48714+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48715 unsigned long flags;
48716
48717 DBGPR("-->xgbe_powerdown\n");
48718@@ -672,7 +672,7 @@ int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
48719 int xgbe_powerup(struct net_device *netdev, unsigned int caller)
48720 {
48721 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48722- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48723+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48724 unsigned long flags;
48725
48726 DBGPR("-->xgbe_powerup\n");
48727@@ -709,7 +709,7 @@ int xgbe_powerup(struct net_device *netdev, unsigned int caller)
48728
48729 static int xgbe_start(struct xgbe_prv_data *pdata)
48730 {
48731- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48732+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48733 struct net_device *netdev = pdata->netdev;
48734
48735 DBGPR("-->xgbe_start\n");
48736@@ -735,7 +735,7 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
48737
48738 static void xgbe_stop(struct xgbe_prv_data *pdata)
48739 {
48740- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48741+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48742 struct net_device *netdev = pdata->netdev;
48743
48744 DBGPR("-->xgbe_stop\n");
48745@@ -755,7 +755,7 @@ static void xgbe_stop(struct xgbe_prv_data *pdata)
48746
48747 static void xgbe_restart_dev(struct xgbe_prv_data *pdata, unsigned int reset)
48748 {
48749- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48750+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48751
48752 DBGPR("-->xgbe_restart_dev\n");
48753
48754@@ -952,7 +952,7 @@ static int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata,
48755 return -ERANGE;
48756 }
48757
48758- pdata->hw_if.config_tstamp(pdata, mac_tscr);
48759+ pdata->hw_if->config_tstamp(pdata, mac_tscr);
48760
48761 memcpy(&pdata->tstamp_config, &config, sizeof(config));
48762
48763@@ -1090,8 +1090,8 @@ static void xgbe_packet_info(struct xgbe_prv_data *pdata,
48764 static int xgbe_open(struct net_device *netdev)
48765 {
48766 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48767- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48768- struct xgbe_desc_if *desc_if = &pdata->desc_if;
48769+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48770+ struct xgbe_desc_if *desc_if = pdata->desc_if;
48771 int ret;
48772
48773 DBGPR("-->xgbe_open\n");
48774@@ -1171,8 +1171,8 @@ err_phy_init:
48775 static int xgbe_close(struct net_device *netdev)
48776 {
48777 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48778- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48779- struct xgbe_desc_if *desc_if = &pdata->desc_if;
48780+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48781+ struct xgbe_desc_if *desc_if = pdata->desc_if;
48782
48783 DBGPR("-->xgbe_close\n");
48784
48785@@ -1206,8 +1206,8 @@ static int xgbe_close(struct net_device *netdev)
48786 static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
48787 {
48788 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48789- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48790- struct xgbe_desc_if *desc_if = &pdata->desc_if;
48791+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48792+ struct xgbe_desc_if *desc_if = pdata->desc_if;
48793 struct xgbe_channel *channel;
48794 struct xgbe_ring *ring;
48795 struct xgbe_packet_data *packet;
48796@@ -1276,7 +1276,7 @@ tx_netdev_return:
48797 static void xgbe_set_rx_mode(struct net_device *netdev)
48798 {
48799 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48800- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48801+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48802 unsigned int pr_mode, am_mode;
48803
48804 DBGPR("-->xgbe_set_rx_mode\n");
48805@@ -1295,7 +1295,7 @@ static void xgbe_set_rx_mode(struct net_device *netdev)
48806 static int xgbe_set_mac_address(struct net_device *netdev, void *addr)
48807 {
48808 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48809- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48810+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48811 struct sockaddr *saddr = addr;
48812
48813 DBGPR("-->xgbe_set_mac_address\n");
48814@@ -1362,7 +1362,7 @@ static struct rtnl_link_stats64 *xgbe_get_stats64(struct net_device *netdev,
48815
48816 DBGPR("-->%s\n", __func__);
48817
48818- pdata->hw_if.read_mmc_stats(pdata);
48819+ pdata->hw_if->read_mmc_stats(pdata);
48820
48821 s->rx_packets = pstats->rxframecount_gb;
48822 s->rx_bytes = pstats->rxoctetcount_gb;
48823@@ -1389,7 +1389,7 @@ static int xgbe_vlan_rx_add_vid(struct net_device *netdev, __be16 proto,
48824 u16 vid)
48825 {
48826 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48827- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48828+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48829
48830 DBGPR("-->%s\n", __func__);
48831
48832@@ -1405,7 +1405,7 @@ static int xgbe_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto,
48833 u16 vid)
48834 {
48835 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48836- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48837+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48838
48839 DBGPR("-->%s\n", __func__);
48840
48841@@ -1465,7 +1465,7 @@ static int xgbe_set_features(struct net_device *netdev,
48842 netdev_features_t features)
48843 {
48844 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48845- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48846+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48847 unsigned int rxcsum, rxvlan, rxvlan_filter;
48848
48849 rxcsum = pdata->netdev_features & NETIF_F_RXCSUM;
48850@@ -1521,7 +1521,7 @@ struct net_device_ops *xgbe_get_netdev_ops(void)
48851 static void xgbe_rx_refresh(struct xgbe_channel *channel)
48852 {
48853 struct xgbe_prv_data *pdata = channel->pdata;
48854- struct xgbe_desc_if *desc_if = &pdata->desc_if;
48855+ struct xgbe_desc_if *desc_if = pdata->desc_if;
48856 struct xgbe_ring *ring = channel->rx_ring;
48857 struct xgbe_ring_data *rdata;
48858
48859@@ -1537,8 +1537,8 @@ static void xgbe_rx_refresh(struct xgbe_channel *channel)
48860 static int xgbe_tx_poll(struct xgbe_channel *channel)
48861 {
48862 struct xgbe_prv_data *pdata = channel->pdata;
48863- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48864- struct xgbe_desc_if *desc_if = &pdata->desc_if;
48865+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48866+ struct xgbe_desc_if *desc_if = pdata->desc_if;
48867 struct xgbe_ring *ring = channel->tx_ring;
48868 struct xgbe_ring_data *rdata;
48869 struct xgbe_ring_desc *rdesc;
48870@@ -1590,7 +1590,7 @@ static int xgbe_tx_poll(struct xgbe_channel *channel)
48871 static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
48872 {
48873 struct xgbe_prv_data *pdata = channel->pdata;
48874- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48875+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48876 struct xgbe_ring *ring = channel->rx_ring;
48877 struct xgbe_ring_data *rdata;
48878 struct xgbe_packet_data *packet;
48879diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
48880index 46f6130..f37dde3 100644
48881--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
48882+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
48883@@ -203,7 +203,7 @@ static void xgbe_get_ethtool_stats(struct net_device *netdev,
48884
48885 DBGPR("-->%s\n", __func__);
48886
48887- pdata->hw_if.read_mmc_stats(pdata);
48888+ pdata->hw_if->read_mmc_stats(pdata);
48889 for (i = 0; i < XGBE_STATS_COUNT; i++) {
48890 stat = (u8 *)pdata + xgbe_gstring_stats[i].stat_offset;
48891 *data++ = *(u64 *)stat;
48892@@ -378,7 +378,7 @@ static int xgbe_get_coalesce(struct net_device *netdev,
48893 struct ethtool_coalesce *ec)
48894 {
48895 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48896- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48897+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48898 unsigned int riwt;
48899
48900 DBGPR("-->xgbe_get_coalesce\n");
48901@@ -401,7 +401,7 @@ static int xgbe_set_coalesce(struct net_device *netdev,
48902 struct ethtool_coalesce *ec)
48903 {
48904 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48905- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48906+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48907 unsigned int rx_frames, rx_riwt, rx_usecs;
48908 unsigned int tx_frames, tx_usecs;
48909
48910diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
48911index bdf9cfa..340aea1 100644
48912--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
48913+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
48914@@ -210,12 +210,6 @@ static void xgbe_default_config(struct xgbe_prv_data *pdata)
48915 DBGPR("<--xgbe_default_config\n");
48916 }
48917
48918-static void xgbe_init_all_fptrs(struct xgbe_prv_data *pdata)
48919-{
48920- xgbe_init_function_ptrs_dev(&pdata->hw_if);
48921- xgbe_init_function_ptrs_desc(&pdata->desc_if);
48922-}
48923-
48924 static int xgbe_probe(struct platform_device *pdev)
48925 {
48926 struct xgbe_prv_data *pdata;
48927@@ -328,9 +322,8 @@ static int xgbe_probe(struct platform_device *pdev)
48928 netdev->base_addr = (unsigned long)pdata->xgmac_regs;
48929
48930 /* Set all the function pointers */
48931- xgbe_init_all_fptrs(pdata);
48932- hw_if = &pdata->hw_if;
48933- desc_if = &pdata->desc_if;
48934+ hw_if = pdata->hw_if = &default_xgbe_hw_if;
48935+ desc_if = pdata->desc_if = &default_xgbe_desc_if;
48936
48937 /* Issue software reset to device */
48938 hw_if->exit(pdata);
48939diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
48940index 6d2221e..47d1325 100644
48941--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
48942+++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
48943@@ -127,7 +127,7 @@
48944 static int xgbe_mdio_read(struct mii_bus *mii, int prtad, int mmd_reg)
48945 {
48946 struct xgbe_prv_data *pdata = mii->priv;
48947- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48948+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48949 int mmd_data;
48950
48951 DBGPR_MDIO("-->xgbe_mdio_read: prtad=%#x mmd_reg=%#x\n",
48952@@ -144,7 +144,7 @@ static int xgbe_mdio_write(struct mii_bus *mii, int prtad, int mmd_reg,
48953 u16 mmd_val)
48954 {
48955 struct xgbe_prv_data *pdata = mii->priv;
48956- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48957+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48958 int mmd_data = mmd_val;
48959
48960 DBGPR_MDIO("-->xgbe_mdio_write: prtad=%#x mmd_reg=%#x mmd_data=%#x\n",
48961diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
48962index 37e64cf..c3b61cf 100644
48963--- a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
48964+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
48965@@ -130,7 +130,7 @@ static cycle_t xgbe_cc_read(const struct cyclecounter *cc)
48966 tstamp_cc);
48967 u64 nsec;
48968
48969- nsec = pdata->hw_if.get_tstamp_time(pdata);
48970+ nsec = pdata->hw_if->get_tstamp_time(pdata);
48971
48972 return nsec;
48973 }
48974@@ -159,7 +159,7 @@ static int xgbe_adjfreq(struct ptp_clock_info *info, s32 delta)
48975
48976 spin_lock_irqsave(&pdata->tstamp_lock, flags);
48977
48978- pdata->hw_if.update_tstamp_addend(pdata, addend);
48979+ pdata->hw_if->update_tstamp_addend(pdata, addend);
48980
48981 spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
48982
48983diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
48984index e9fe6e6..875fbaf 100644
48985--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
48986+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
48987@@ -585,8 +585,8 @@ struct xgbe_prv_data {
48988
48989 int irq_number;
48990
48991- struct xgbe_hw_if hw_if;
48992- struct xgbe_desc_if desc_if;
48993+ const struct xgbe_hw_if *hw_if;
48994+ const struct xgbe_desc_if *desc_if;
48995
48996 /* AXI DMA settings */
48997 unsigned int axdomain;
48998@@ -699,6 +699,9 @@ struct xgbe_prv_data {
48999 #endif
49000 };
49001
49002+extern const struct xgbe_hw_if default_xgbe_hw_if;
49003+extern const struct xgbe_desc_if default_xgbe_desc_if;
49004+
49005 /* Function prototypes*/
49006
49007 void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *);
49008diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
49009index 571427c..e9fe9e7 100644
49010--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
49011+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
49012@@ -1058,7 +1058,7 @@ static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp)
49013 static inline void bnx2x_init_bp_objs(struct bnx2x *bp)
49014 {
49015 /* RX_MODE controlling object */
49016- bnx2x_init_rx_mode_obj(bp, &bp->rx_mode_obj);
49017+ bnx2x_init_rx_mode_obj(bp);
49018
49019 /* multicast configuration controlling object */
49020 bnx2x_init_mcast_obj(bp, &bp->mcast_obj, bp->fp->cl_id, bp->fp->cid,
49021diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
49022index b193604..8873bfd 100644
49023--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
49024+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
49025@@ -2329,15 +2329,14 @@ int bnx2x_config_rx_mode(struct bnx2x *bp,
49026 return rc;
49027 }
49028
49029-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
49030- struct bnx2x_rx_mode_obj *o)
49031+void bnx2x_init_rx_mode_obj(struct bnx2x *bp)
49032 {
49033 if (CHIP_IS_E1x(bp)) {
49034- o->wait_comp = bnx2x_empty_rx_mode_wait;
49035- o->config_rx_mode = bnx2x_set_rx_mode_e1x;
49036+ bp->rx_mode_obj.wait_comp = bnx2x_empty_rx_mode_wait;
49037+ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e1x;
49038 } else {
49039- o->wait_comp = bnx2x_wait_rx_mode_comp_e2;
49040- o->config_rx_mode = bnx2x_set_rx_mode_e2;
49041+ bp->rx_mode_obj.wait_comp = bnx2x_wait_rx_mode_comp_e2;
49042+ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e2;
49043 }
49044 }
49045
49046diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
49047index 718ecd2..2183b2f 100644
49048--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
49049+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
49050@@ -1340,8 +1340,7 @@ int bnx2x_vlan_mac_move(struct bnx2x *bp,
49051
49052 /********************* RX MODE ****************/
49053
49054-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
49055- struct bnx2x_rx_mode_obj *o);
49056+void bnx2x_init_rx_mode_obj(struct bnx2x *bp);
49057
49058 /**
49059 * bnx2x_config_rx_mode - Send and RX_MODE ramrod according to the provided parameters.
49060diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
49061index 31c9f82..e65e986 100644
49062--- a/drivers/net/ethernet/broadcom/tg3.h
49063+++ b/drivers/net/ethernet/broadcom/tg3.h
49064@@ -150,6 +150,7 @@
49065 #define CHIPREV_ID_5750_A0 0x4000
49066 #define CHIPREV_ID_5750_A1 0x4001
49067 #define CHIPREV_ID_5750_A3 0x4003
49068+#define CHIPREV_ID_5750_C1 0x4201
49069 #define CHIPREV_ID_5750_C2 0x4202
49070 #define CHIPREV_ID_5752_A0_HW 0x5000
49071 #define CHIPREV_ID_5752_A0 0x6000
49072diff --git a/drivers/net/ethernet/brocade/bna/bna_enet.c b/drivers/net/ethernet/brocade/bna/bna_enet.c
49073index 13f9636..228040f 100644
49074--- a/drivers/net/ethernet/brocade/bna/bna_enet.c
49075+++ b/drivers/net/ethernet/brocade/bna/bna_enet.c
49076@@ -1690,10 +1690,10 @@ bna_cb_ioceth_reset(void *arg)
49077 }
49078
49079 static struct bfa_ioc_cbfn bna_ioceth_cbfn = {
49080- bna_cb_ioceth_enable,
49081- bna_cb_ioceth_disable,
49082- bna_cb_ioceth_hbfail,
49083- bna_cb_ioceth_reset
49084+ .enable_cbfn = bna_cb_ioceth_enable,
49085+ .disable_cbfn = bna_cb_ioceth_disable,
49086+ .hbfail_cbfn = bna_cb_ioceth_hbfail,
49087+ .reset_cbfn = bna_cb_ioceth_reset
49088 };
49089
49090 static void bna_attr_init(struct bna_ioceth *ioceth)
49091diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
49092index ffc92a4..40edc77 100644
49093--- a/drivers/net/ethernet/brocade/bna/bnad.c
49094+++ b/drivers/net/ethernet/brocade/bna/bnad.c
49095@@ -552,6 +552,7 @@ bnad_cq_setup_skb_frags(struct bna_rcb *rcb, struct sk_buff *skb,
49096
49097 len = (vec == nvecs) ?
49098 last_fraglen : unmap->vector.len;
49099+ skb->truesize += unmap->vector.len;
49100 totlen += len;
49101
49102 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
49103@@ -563,7 +564,6 @@ bnad_cq_setup_skb_frags(struct bna_rcb *rcb, struct sk_buff *skb,
49104
49105 skb->len += totlen;
49106 skb->data_len += totlen;
49107- skb->truesize += totlen;
49108 }
49109
49110 static inline void
49111diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
49112index 8cffcdf..aadf043 100644
49113--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
49114+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
49115@@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
49116 */
49117 struct l2t_skb_cb {
49118 arp_failure_handler_func arp_failure_handler;
49119-};
49120+} __no_const;
49121
49122 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
49123
49124diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
49125index e5be511..16cb55c 100644
49126--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
49127+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
49128@@ -2355,7 +2355,7 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
49129
49130 int i;
49131 struct adapter *ap = netdev2adap(dev);
49132- static const unsigned int *reg_ranges;
49133+ const unsigned int *reg_ranges;
49134 int arr_size = 0, buf_size = 0;
49135
49136 if (is_t4(ap->params.chip)) {
49137diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
49138index cf8b6ff..274271e 100644
49139--- a/drivers/net/ethernet/dec/tulip/de4x5.c
49140+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
49141@@ -5387,7 +5387,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
49142 for (i=0; i<ETH_ALEN; i++) {
49143 tmp.addr[i] = dev->dev_addr[i];
49144 }
49145- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
49146+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
49147 break;
49148
49149 case DE4X5_SET_HWADDR: /* Set the hardware address */
49150@@ -5427,7 +5427,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
49151 spin_lock_irqsave(&lp->lock, flags);
49152 memcpy(&statbuf, &lp->pktStats, ioc->len);
49153 spin_unlock_irqrestore(&lp->lock, flags);
49154- if (copy_to_user(ioc->data, &statbuf, ioc->len))
49155+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
49156 return -EFAULT;
49157 break;
49158 }
49159diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
49160index 93ff8ef..01e0537 100644
49161--- a/drivers/net/ethernet/emulex/benet/be_main.c
49162+++ b/drivers/net/ethernet/emulex/benet/be_main.c
49163@@ -533,7 +533,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
49164
49165 if (wrapped)
49166 newacc += 65536;
49167- ACCESS_ONCE(*acc) = newacc;
49168+ ACCESS_ONCE_RW(*acc) = newacc;
49169 }
49170
49171 static void populate_erx_stats(struct be_adapter *adapter,
49172diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
49173index c77fa4a..7fd42fc 100644
49174--- a/drivers/net/ethernet/faraday/ftgmac100.c
49175+++ b/drivers/net/ethernet/faraday/ftgmac100.c
49176@@ -30,6 +30,8 @@
49177 #include <linux/netdevice.h>
49178 #include <linux/phy.h>
49179 #include <linux/platform_device.h>
49180+#include <linux/interrupt.h>
49181+#include <linux/irqreturn.h>
49182 #include <net/ip.h>
49183
49184 #include "ftgmac100.h"
49185diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
49186index 4ff1adc..0ea6bf4 100644
49187--- a/drivers/net/ethernet/faraday/ftmac100.c
49188+++ b/drivers/net/ethernet/faraday/ftmac100.c
49189@@ -31,6 +31,8 @@
49190 #include <linux/module.h>
49191 #include <linux/netdevice.h>
49192 #include <linux/platform_device.h>
49193+#include <linux/interrupt.h>
49194+#include <linux/irqreturn.h>
49195
49196 #include "ftmac100.h"
49197
49198diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
49199index 537b621..07f87ce 100644
49200--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
49201+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
49202@@ -401,7 +401,7 @@ void i40e_ptp_set_increment(struct i40e_pf *pf)
49203 wr32(hw, I40E_PRTTSYN_INC_H, incval >> 32);
49204
49205 /* Update the base adjustement value. */
49206- ACCESS_ONCE(pf->ptp_base_adj) = incval;
49207+ ACCESS_ONCE_RW(pf->ptp_base_adj) = incval;
49208 smp_mb(); /* Force the above update. */
49209 }
49210
49211diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
49212index 5fd4b52..87aa34b 100644
49213--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
49214+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
49215@@ -794,7 +794,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
49216 }
49217
49218 /* update the base incval used to calculate frequency adjustment */
49219- ACCESS_ONCE(adapter->base_incval) = incval;
49220+ ACCESS_ONCE_RW(adapter->base_incval) = incval;
49221 smp_mb();
49222
49223 /* need lock to prevent incorrect read while modifying cyclecounter */
49224diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
49225index c14d4d8..66da603 100644
49226--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
49227+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
49228@@ -1259,6 +1259,9 @@ int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting)
49229 struct ixgbe_hw *hw = &adapter->hw;
49230 u32 regval;
49231
49232+ if (vf >= adapter->num_vfs)
49233+ return -EINVAL;
49234+
49235 adapter->vfinfo[vf].spoofchk_enabled = setting;
49236
49237 regval = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
49238diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
49239index 2bbd01f..e8baa64 100644
49240--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
49241+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
49242@@ -3457,7 +3457,10 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
49243 struct __vxge_hw_fifo *fifo;
49244 struct vxge_hw_fifo_config *config;
49245 u32 txdl_size, txdl_per_memblock;
49246- struct vxge_hw_mempool_cbs fifo_mp_callback;
49247+ static struct vxge_hw_mempool_cbs fifo_mp_callback = {
49248+ .item_func_alloc = __vxge_hw_fifo_mempool_item_alloc,
49249+ };
49250+
49251 struct __vxge_hw_virtualpath *vpath;
49252
49253 if ((vp == NULL) || (attr == NULL)) {
49254@@ -3540,8 +3543,6 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
49255 goto exit;
49256 }
49257
49258- fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc;
49259-
49260 fifo->mempool =
49261 __vxge_hw_mempool_create(vpath->hldev,
49262 fifo->config->memblock_size,
49263diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
49264index 3172cdf..d01ab34 100644
49265--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
49266+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
49267@@ -2190,7 +2190,9 @@ int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter)
49268 max_tx_rings = QLCNIC_MAX_VNIC_TX_RINGS;
49269 } else if (ret == QLC_83XX_DEFAULT_OPMODE) {
49270 ahw->nic_mode = QLCNIC_DEFAULT_MODE;
49271- adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
49272+ pax_open_kernel();
49273+ *(void **)&adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
49274+ pax_close_kernel();
49275 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
49276 max_sds_rings = QLCNIC_MAX_SDS_RINGS;
49277 max_tx_rings = QLCNIC_MAX_TX_RINGS;
49278diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
49279index be7d7a6..a8983f8 100644
49280--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
49281+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
49282@@ -207,17 +207,23 @@ int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *adapter)
49283 case QLCNIC_NON_PRIV_FUNC:
49284 ahw->op_mode = QLCNIC_NON_PRIV_FUNC;
49285 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
49286- nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic;
49287+ pax_open_kernel();
49288+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic;
49289+ pax_close_kernel();
49290 break;
49291 case QLCNIC_PRIV_FUNC:
49292 ahw->op_mode = QLCNIC_PRIV_FUNC;
49293 ahw->idc.state_entry = qlcnic_83xx_idc_vnic_pf_entry;
49294- nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic;
49295+ pax_open_kernel();
49296+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic;
49297+ pax_close_kernel();
49298 break;
49299 case QLCNIC_MGMT_FUNC:
49300 ahw->op_mode = QLCNIC_MGMT_FUNC;
49301 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
49302- nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic;
49303+ pax_open_kernel();
49304+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic;
49305+ pax_close_kernel();
49306 break;
49307 default:
49308 dev_err(&adapter->pdev->dev, "Invalid Virtual NIC opmode\n");
49309diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
49310index c9f57fb..208bdc1 100644
49311--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
49312+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
49313@@ -1285,7 +1285,7 @@ flash_temp:
49314 int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
49315 {
49316 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
49317- static const struct qlcnic_dump_operations *fw_dump_ops;
49318+ const struct qlcnic_dump_operations *fw_dump_ops;
49319 struct qlcnic_83xx_dump_template_hdr *hdr_83xx;
49320 u32 entry_offset, dump, no_entries, buf_offset = 0;
49321 int i, k, ops_cnt, ops_index, dump_size = 0;
49322diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
49323index 0921302..927f761 100644
49324--- a/drivers/net/ethernet/realtek/r8169.c
49325+++ b/drivers/net/ethernet/realtek/r8169.c
49326@@ -744,22 +744,22 @@ struct rtl8169_private {
49327 struct mdio_ops {
49328 void (*write)(struct rtl8169_private *, int, int);
49329 int (*read)(struct rtl8169_private *, int);
49330- } mdio_ops;
49331+ } __no_const mdio_ops;
49332
49333 struct pll_power_ops {
49334 void (*down)(struct rtl8169_private *);
49335 void (*up)(struct rtl8169_private *);
49336- } pll_power_ops;
49337+ } __no_const pll_power_ops;
49338
49339 struct jumbo_ops {
49340 void (*enable)(struct rtl8169_private *);
49341 void (*disable)(struct rtl8169_private *);
49342- } jumbo_ops;
49343+ } __no_const jumbo_ops;
49344
49345 struct csi_ops {
49346 void (*write)(struct rtl8169_private *, int, int);
49347 u32 (*read)(struct rtl8169_private *, int);
49348- } csi_ops;
49349+ } __no_const csi_ops;
49350
49351 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
49352 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
49353diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
49354index 6b861e3..204ac86 100644
49355--- a/drivers/net/ethernet/sfc/ptp.c
49356+++ b/drivers/net/ethernet/sfc/ptp.c
49357@@ -822,7 +822,7 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
49358 ptp->start.dma_addr);
49359
49360 /* Clear flag that signals MC ready */
49361- ACCESS_ONCE(*start) = 0;
49362+ ACCESS_ONCE_RW(*start) = 0;
49363 rc = efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf,
49364 MC_CMD_PTP_IN_SYNCHRONIZE_LEN);
49365 EFX_BUG_ON_PARANOID(rc);
49366diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
49367index 08c483b..2c4a553 100644
49368--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
49369+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
49370@@ -140,8 +140,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
49371
49372 writel(value, ioaddr + MMC_CNTRL);
49373
49374- pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
49375- MMC_CNTRL, value);
49376+// pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
49377+// MMC_CNTRL, value);
49378 }
49379
49380 /* To mask all all interrupts.*/
49381diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
49382index d5e07de..e3bf20a 100644
49383--- a/drivers/net/hyperv/hyperv_net.h
49384+++ b/drivers/net/hyperv/hyperv_net.h
49385@@ -171,7 +171,7 @@ struct rndis_device {
49386 enum rndis_device_state state;
49387 bool link_state;
49388 bool link_change;
49389- atomic_t new_req_id;
49390+ atomic_unchecked_t new_req_id;
49391
49392 spinlock_t request_lock;
49393 struct list_head req_list;
49394diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
49395index 0fcb5e7..148fda3 100644
49396--- a/drivers/net/hyperv/netvsc_drv.c
49397+++ b/drivers/net/hyperv/netvsc_drv.c
49398@@ -556,6 +556,7 @@ do_lso:
49399 do_send:
49400 /* Start filling in the page buffers with the rndis hdr */
49401 rndis_msg->msg_len += rndis_msg_size;
49402+ packet->total_data_buflen = rndis_msg->msg_len;
49403 packet->page_buf_cnt = init_page_array(rndis_msg, rndis_msg_size,
49404 skb, &packet->page_buf[0]);
49405
49406diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
49407index 2b86f0b..ecc996f 100644
49408--- a/drivers/net/hyperv/rndis_filter.c
49409+++ b/drivers/net/hyperv/rndis_filter.c
49410@@ -102,7 +102,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
49411 * template
49412 */
49413 set = &rndis_msg->msg.set_req;
49414- set->req_id = atomic_inc_return(&dev->new_req_id);
49415+ set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
49416
49417 /* Add to the request list */
49418 spin_lock_irqsave(&dev->request_lock, flags);
49419@@ -911,7 +911,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
49420
49421 /* Setup the rndis set */
49422 halt = &request->request_msg.msg.halt_req;
49423- halt->req_id = atomic_inc_return(&dev->new_req_id);
49424+ halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
49425
49426 /* Ignore return since this msg is optional. */
49427 rndis_filter_send_request(dev, request);
49428diff --git a/drivers/net/ieee802154/fakehard.c b/drivers/net/ieee802154/fakehard.c
49429index 9ce854f..e43fa17 100644
49430--- a/drivers/net/ieee802154/fakehard.c
49431+++ b/drivers/net/ieee802154/fakehard.c
49432@@ -365,7 +365,7 @@ static int ieee802154fake_probe(struct platform_device *pdev)
49433 phy->transmit_power = 0xbf;
49434
49435 dev->netdev_ops = &fake_ops;
49436- dev->ml_priv = &fake_mlme;
49437+ dev->ml_priv = (void *)&fake_mlme;
49438
49439 priv = netdev_priv(dev);
49440 priv->phy = phy;
49441diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
49442index 726edab..8939092 100644
49443--- a/drivers/net/macvlan.c
49444+++ b/drivers/net/macvlan.c
49445@@ -264,7 +264,7 @@ static void macvlan_broadcast_enqueue(struct macvlan_port *port,
49446 free_nskb:
49447 kfree_skb(nskb);
49448 err:
49449- atomic_long_inc(&skb->dev->rx_dropped);
49450+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
49451 }
49452
49453 /* called under rcu_read_lock() from netif_receive_skb */
49454@@ -1144,13 +1144,15 @@ static const struct nla_policy macvlan_policy[IFLA_MACVLAN_MAX + 1] = {
49455 int macvlan_link_register(struct rtnl_link_ops *ops)
49456 {
49457 /* common fields */
49458- ops->priv_size = sizeof(struct macvlan_dev);
49459- ops->validate = macvlan_validate;
49460- ops->maxtype = IFLA_MACVLAN_MAX;
49461- ops->policy = macvlan_policy;
49462- ops->changelink = macvlan_changelink;
49463- ops->get_size = macvlan_get_size;
49464- ops->fill_info = macvlan_fill_info;
49465+ pax_open_kernel();
49466+ *(size_t *)&ops->priv_size = sizeof(struct macvlan_dev);
49467+ *(void **)&ops->validate = macvlan_validate;
49468+ *(int *)&ops->maxtype = IFLA_MACVLAN_MAX;
49469+ *(const void **)&ops->policy = macvlan_policy;
49470+ *(void **)&ops->changelink = macvlan_changelink;
49471+ *(void **)&ops->get_size = macvlan_get_size;
49472+ *(void **)&ops->fill_info = macvlan_fill_info;
49473+ pax_close_kernel();
49474
49475 return rtnl_link_register(ops);
49476 };
49477@@ -1230,7 +1232,7 @@ static int macvlan_device_event(struct notifier_block *unused,
49478 return NOTIFY_DONE;
49479 }
49480
49481-static struct notifier_block macvlan_notifier_block __read_mostly = {
49482+static struct notifier_block macvlan_notifier_block = {
49483 .notifier_call = macvlan_device_event,
49484 };
49485
49486diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
49487index 0c6adaa..0784e3f 100644
49488--- a/drivers/net/macvtap.c
49489+++ b/drivers/net/macvtap.c
49490@@ -1018,7 +1018,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
49491 }
49492
49493 ret = 0;
49494- if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
49495+ if (copy_to_user(ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
49496 put_user(q->flags, &ifr->ifr_flags))
49497 ret = -EFAULT;
49498 macvtap_put_vlan(vlan);
49499@@ -1188,7 +1188,7 @@ static int macvtap_device_event(struct notifier_block *unused,
49500 return NOTIFY_DONE;
49501 }
49502
49503-static struct notifier_block macvtap_notifier_block __read_mostly = {
49504+static struct notifier_block macvtap_notifier_block = {
49505 .notifier_call = macvtap_device_event,
49506 };
49507
49508diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
49509index fa0d717..bab8c01 100644
49510--- a/drivers/net/ppp/ppp_generic.c
49511+++ b/drivers/net/ppp/ppp_generic.c
49512@@ -594,7 +594,7 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
49513 if (file == ppp->owner)
49514 ppp_shutdown_interface(ppp);
49515 }
49516- if (atomic_long_read(&file->f_count) <= 2) {
49517+ if (atomic_long_read(&file->f_count) < 2) {
49518 ppp_release(NULL, file);
49519 err = 0;
49520 } else
49521@@ -1020,7 +1020,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
49522 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
49523 struct ppp_stats stats;
49524 struct ppp_comp_stats cstats;
49525- char *vers;
49526
49527 switch (cmd) {
49528 case SIOCGPPPSTATS:
49529@@ -1042,8 +1041,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
49530 break;
49531
49532 case SIOCGPPPVER:
49533- vers = PPP_VERSION;
49534- if (copy_to_user(addr, vers, strlen(vers) + 1))
49535+ if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
49536 break;
49537 err = 0;
49538 break;
49539diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c
49540index 079f7ad..b2a2bfa7 100644
49541--- a/drivers/net/slip/slhc.c
49542+++ b/drivers/net/slip/slhc.c
49543@@ -487,7 +487,7 @@ slhc_uncompress(struct slcompress *comp, unsigned char *icp, int isize)
49544 register struct tcphdr *thp;
49545 register struct iphdr *ip;
49546 register struct cstate *cs;
49547- int len, hdrlen;
49548+ long len, hdrlen;
49549 unsigned char *cp = icp;
49550
49551 /* We've got a compressed packet; read the change byte */
49552diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
49553index 1f76c2ea..9681171 100644
49554--- a/drivers/net/team/team.c
49555+++ b/drivers/net/team/team.c
49556@@ -2862,7 +2862,7 @@ static int team_device_event(struct notifier_block *unused,
49557 return NOTIFY_DONE;
49558 }
49559
49560-static struct notifier_block team_notifier_block __read_mostly = {
49561+static struct notifier_block team_notifier_block = {
49562 .notifier_call = team_device_event,
49563 };
49564
49565diff --git a/drivers/net/tun.c b/drivers/net/tun.c
49566index acaaf67..a33483d 100644
49567--- a/drivers/net/tun.c
49568+++ b/drivers/net/tun.c
49569@@ -1855,7 +1855,7 @@ unlock:
49570 }
49571
49572 static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
49573- unsigned long arg, int ifreq_len)
49574+ unsigned long arg, size_t ifreq_len)
49575 {
49576 struct tun_file *tfile = file->private_data;
49577 struct tun_struct *tun;
49578@@ -1868,6 +1868,9 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
49579 unsigned int ifindex;
49580 int ret;
49581
49582+ if (ifreq_len > sizeof ifr)
49583+ return -EFAULT;
49584+
49585 if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) {
49586 if (copy_from_user(&ifr, argp, ifreq_len))
49587 return -EFAULT;
49588diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
49589index babda7d..e40c90a 100644
49590--- a/drivers/net/usb/hso.c
49591+++ b/drivers/net/usb/hso.c
49592@@ -71,7 +71,7 @@
49593 #include <asm/byteorder.h>
49594 #include <linux/serial_core.h>
49595 #include <linux/serial.h>
49596-
49597+#include <asm/local.h>
49598
49599 #define MOD_AUTHOR "Option Wireless"
49600 #define MOD_DESCRIPTION "USB High Speed Option driver"
49601@@ -1178,7 +1178,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
49602 struct urb *urb;
49603
49604 urb = serial->rx_urb[0];
49605- if (serial->port.count > 0) {
49606+ if (atomic_read(&serial->port.count) > 0) {
49607 count = put_rxbuf_data(urb, serial);
49608 if (count == -1)
49609 return;
49610@@ -1216,7 +1216,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
49611 DUMP1(urb->transfer_buffer, urb->actual_length);
49612
49613 /* Anyone listening? */
49614- if (serial->port.count == 0)
49615+ if (atomic_read(&serial->port.count) == 0)
49616 return;
49617
49618 if (serial->parent->port_spec & HSO_INFO_CRC_BUG)
49619@@ -1278,8 +1278,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
49620 tty_port_tty_set(&serial->port, tty);
49621
49622 /* check for port already opened, if not set the termios */
49623- serial->port.count++;
49624- if (serial->port.count == 1) {
49625+ if (atomic_inc_return(&serial->port.count) == 1) {
49626 serial->rx_state = RX_IDLE;
49627 /* Force default termio settings */
49628 _hso_serial_set_termios(tty, NULL);
49629@@ -1289,7 +1288,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
49630 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
49631 if (result) {
49632 hso_stop_serial_device(serial->parent);
49633- serial->port.count--;
49634+ atomic_dec(&serial->port.count);
49635 kref_put(&serial->parent->ref, hso_serial_ref_free);
49636 }
49637 } else {
49638@@ -1326,10 +1325,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
49639
49640 /* reset the rts and dtr */
49641 /* do the actual close */
49642- serial->port.count--;
49643+ atomic_dec(&serial->port.count);
49644
49645- if (serial->port.count <= 0) {
49646- serial->port.count = 0;
49647+ if (atomic_read(&serial->port.count) <= 0) {
49648+ atomic_set(&serial->port.count, 0);
49649 tty_port_tty_set(&serial->port, NULL);
49650 if (!usb_gone)
49651 hso_stop_serial_device(serial->parent);
49652@@ -1404,7 +1403,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
49653
49654 /* the actual setup */
49655 spin_lock_irqsave(&serial->serial_lock, flags);
49656- if (serial->port.count)
49657+ if (atomic_read(&serial->port.count))
49658 _hso_serial_set_termios(tty, old);
49659 else
49660 tty->termios = *old;
49661@@ -1873,7 +1872,7 @@ static void intr_callback(struct urb *urb)
49662 D1("Pending read interrupt on port %d\n", i);
49663 spin_lock(&serial->serial_lock);
49664 if (serial->rx_state == RX_IDLE &&
49665- serial->port.count > 0) {
49666+ atomic_read(&serial->port.count) > 0) {
49667 /* Setup and send a ctrl req read on
49668 * port i */
49669 if (!serial->rx_urb_filled[0]) {
49670@@ -3047,7 +3046,7 @@ static int hso_resume(struct usb_interface *iface)
49671 /* Start all serial ports */
49672 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
49673 if (serial_table[i] && (serial_table[i]->interface == iface)) {
49674- if (dev2ser(serial_table[i])->port.count) {
49675+ if (atomic_read(&dev2ser(serial_table[i])->port.count)) {
49676 result =
49677 hso_start_serial_device(serial_table[i], GFP_NOIO);
49678 hso_kick_transmit(dev2ser(serial_table[i]));
49679diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
49680index 604ef21..d1f49a1 100644
49681--- a/drivers/net/usb/r8152.c
49682+++ b/drivers/net/usb/r8152.c
49683@@ -575,7 +575,7 @@ struct r8152 {
49684 void (*up)(struct r8152 *);
49685 void (*down)(struct r8152 *);
49686 void (*unload)(struct r8152 *);
49687- } rtl_ops;
49688+ } __no_const rtl_ops;
49689
49690 int intr_interval;
49691 u32 saved_wolopts;
49692diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
49693index a2515887..6d13233 100644
49694--- a/drivers/net/usb/sierra_net.c
49695+++ b/drivers/net/usb/sierra_net.c
49696@@ -51,7 +51,7 @@ static const char driver_name[] = "sierra_net";
49697 /* atomic counter partially included in MAC address to make sure 2 devices
49698 * do not end up with the same MAC - concept breaks in case of > 255 ifaces
49699 */
49700-static atomic_t iface_counter = ATOMIC_INIT(0);
49701+static atomic_unchecked_t iface_counter = ATOMIC_INIT(0);
49702
49703 /*
49704 * SYNC Timer Delay definition used to set the expiry time
49705@@ -697,7 +697,7 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf)
49706 dev->net->netdev_ops = &sierra_net_device_ops;
49707
49708 /* change MAC addr to include, ifacenum, and to be unique */
49709- dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return(&iface_counter);
49710+ dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return_unchecked(&iface_counter);
49711 dev->net->dev_addr[ETH_ALEN-1] = ifacenum;
49712
49713 /* we will have to manufacture ethernet headers, prepare template */
49714diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
49715index 59caa06..de191b3 100644
49716--- a/drivers/net/virtio_net.c
49717+++ b/drivers/net/virtio_net.c
49718@@ -48,7 +48,7 @@ module_param(gso, bool, 0444);
49719 #define RECEIVE_AVG_WEIGHT 64
49720
49721 /* Minimum alignment for mergeable packet buffers. */
49722-#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256)
49723+#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256UL)
49724
49725 #define VIRTNET_DRIVER_VERSION "1.0.0"
49726
49727diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
49728index beb377b..b5bbf08 100644
49729--- a/drivers/net/vxlan.c
49730+++ b/drivers/net/vxlan.c
49731@@ -1440,9 +1440,6 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb)
49732 if (!in6_dev)
49733 goto out;
49734
49735- if (!pskb_may_pull(skb, skb->len))
49736- goto out;
49737-
49738 iphdr = ipv6_hdr(skb);
49739 saddr = &iphdr->saddr;
49740 daddr = &iphdr->daddr;
49741@@ -1717,6 +1714,8 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
49742 struct pcpu_sw_netstats *tx_stats, *rx_stats;
49743 union vxlan_addr loopback;
49744 union vxlan_addr *remote_ip = &dst_vxlan->default_dst.remote_ip;
49745+ struct net_device *dev = skb->dev;
49746+ int len = skb->len;
49747
49748 tx_stats = this_cpu_ptr(src_vxlan->dev->tstats);
49749 rx_stats = this_cpu_ptr(dst_vxlan->dev->tstats);
49750@@ -1740,16 +1739,16 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
49751
49752 u64_stats_update_begin(&tx_stats->syncp);
49753 tx_stats->tx_packets++;
49754- tx_stats->tx_bytes += skb->len;
49755+ tx_stats->tx_bytes += len;
49756 u64_stats_update_end(&tx_stats->syncp);
49757
49758 if (netif_rx(skb) == NET_RX_SUCCESS) {
49759 u64_stats_update_begin(&rx_stats->syncp);
49760 rx_stats->rx_packets++;
49761- rx_stats->rx_bytes += skb->len;
49762+ rx_stats->rx_bytes += len;
49763 u64_stats_update_end(&rx_stats->syncp);
49764 } else {
49765- skb->dev->stats.rx_dropped++;
49766+ dev->stats.rx_dropped++;
49767 }
49768 }
49769
49770@@ -1927,7 +1926,8 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
49771 return arp_reduce(dev, skb);
49772 #if IS_ENABLED(CONFIG_IPV6)
49773 else if (ntohs(eth->h_proto) == ETH_P_IPV6 &&
49774- skb->len >= sizeof(struct ipv6hdr) + sizeof(struct nd_msg) &&
49775+ pskb_may_pull(skb, sizeof(struct ipv6hdr)
49776+ + sizeof(struct nd_msg)) &&
49777 ipv6_hdr(skb)->nexthdr == IPPROTO_ICMPV6) {
49778 struct nd_msg *msg;
49779
49780@@ -2750,7 +2750,7 @@ nla_put_failure:
49781 return -EMSGSIZE;
49782 }
49783
49784-static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
49785+static struct rtnl_link_ops vxlan_link_ops = {
49786 .kind = "vxlan",
49787 .maxtype = IFLA_VXLAN_MAX,
49788 .policy = vxlan_policy,
49789@@ -2797,7 +2797,7 @@ static int vxlan_lowerdev_event(struct notifier_block *unused,
49790 return NOTIFY_DONE;
49791 }
49792
49793-static struct notifier_block vxlan_notifier_block __read_mostly = {
49794+static struct notifier_block vxlan_notifier_block = {
49795 .notifier_call = vxlan_lowerdev_event,
49796 };
49797
49798diff --git a/drivers/net/wan/lmc/lmc_media.c b/drivers/net/wan/lmc/lmc_media.c
49799index 5920c99..ff2e4a5 100644
49800--- a/drivers/net/wan/lmc/lmc_media.c
49801+++ b/drivers/net/wan/lmc/lmc_media.c
49802@@ -95,62 +95,63 @@ static inline void write_av9110_bit (lmc_softc_t *, int);
49803 static void write_av9110(lmc_softc_t *, u32, u32, u32, u32, u32);
49804
49805 lmc_media_t lmc_ds3_media = {
49806- lmc_ds3_init, /* special media init stuff */
49807- lmc_ds3_default, /* reset to default state */
49808- lmc_ds3_set_status, /* reset status to state provided */
49809- lmc_dummy_set_1, /* set clock source */
49810- lmc_dummy_set2_1, /* set line speed */
49811- lmc_ds3_set_100ft, /* set cable length */
49812- lmc_ds3_set_scram, /* set scrambler */
49813- lmc_ds3_get_link_status, /* get link status */
49814- lmc_dummy_set_1, /* set link status */
49815- lmc_ds3_set_crc_length, /* set CRC length */
49816- lmc_dummy_set_1, /* set T1 or E1 circuit type */
49817- lmc_ds3_watchdog
49818+ .init = lmc_ds3_init, /* special media init stuff */
49819+ .defaults = lmc_ds3_default, /* reset to default state */
49820+ .set_status = lmc_ds3_set_status, /* reset status to state provided */
49821+ .set_clock_source = lmc_dummy_set_1, /* set clock source */
49822+ .set_speed = lmc_dummy_set2_1, /* set line speed */
49823+ .set_cable_length = lmc_ds3_set_100ft, /* set cable length */
49824+ .set_scrambler = lmc_ds3_set_scram, /* set scrambler */
49825+ .get_link_status = lmc_ds3_get_link_status, /* get link status */
49826+ .set_link_status = lmc_dummy_set_1, /* set link status */
49827+ .set_crc_length = lmc_ds3_set_crc_length, /* set CRC length */
49828+ .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
49829+ .watchdog = lmc_ds3_watchdog
49830 };
49831
49832 lmc_media_t lmc_hssi_media = {
49833- lmc_hssi_init, /* special media init stuff */
49834- lmc_hssi_default, /* reset to default state */
49835- lmc_hssi_set_status, /* reset status to state provided */
49836- lmc_hssi_set_clock, /* set clock source */
49837- lmc_dummy_set2_1, /* set line speed */
49838- lmc_dummy_set_1, /* set cable length */
49839- lmc_dummy_set_1, /* set scrambler */
49840- lmc_hssi_get_link_status, /* get link status */
49841- lmc_hssi_set_link_status, /* set link status */
49842- lmc_hssi_set_crc_length, /* set CRC length */
49843- lmc_dummy_set_1, /* set T1 or E1 circuit type */
49844- lmc_hssi_watchdog
49845+ .init = lmc_hssi_init, /* special media init stuff */
49846+ .defaults = lmc_hssi_default, /* reset to default state */
49847+ .set_status = lmc_hssi_set_status, /* reset status to state provided */
49848+ .set_clock_source = lmc_hssi_set_clock, /* set clock source */
49849+ .set_speed = lmc_dummy_set2_1, /* set line speed */
49850+ .set_cable_length = lmc_dummy_set_1, /* set cable length */
49851+ .set_scrambler = lmc_dummy_set_1, /* set scrambler */
49852+ .get_link_status = lmc_hssi_get_link_status, /* get link status */
49853+ .set_link_status = lmc_hssi_set_link_status, /* set link status */
49854+ .set_crc_length = lmc_hssi_set_crc_length, /* set CRC length */
49855+ .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
49856+ .watchdog = lmc_hssi_watchdog
49857 };
49858
49859-lmc_media_t lmc_ssi_media = { lmc_ssi_init, /* special media init stuff */
49860- lmc_ssi_default, /* reset to default state */
49861- lmc_ssi_set_status, /* reset status to state provided */
49862- lmc_ssi_set_clock, /* set clock source */
49863- lmc_ssi_set_speed, /* set line speed */
49864- lmc_dummy_set_1, /* set cable length */
49865- lmc_dummy_set_1, /* set scrambler */
49866- lmc_ssi_get_link_status, /* get link status */
49867- lmc_ssi_set_link_status, /* set link status */
49868- lmc_ssi_set_crc_length, /* set CRC length */
49869- lmc_dummy_set_1, /* set T1 or E1 circuit type */
49870- lmc_ssi_watchdog
49871+lmc_media_t lmc_ssi_media = {
49872+ .init = lmc_ssi_init, /* special media init stuff */
49873+ .defaults = lmc_ssi_default, /* reset to default state */
49874+ .set_status = lmc_ssi_set_status, /* reset status to state provided */
49875+ .set_clock_source = lmc_ssi_set_clock, /* set clock source */
49876+ .set_speed = lmc_ssi_set_speed, /* set line speed */
49877+ .set_cable_length = lmc_dummy_set_1, /* set cable length */
49878+ .set_scrambler = lmc_dummy_set_1, /* set scrambler */
49879+ .get_link_status = lmc_ssi_get_link_status, /* get link status */
49880+ .set_link_status = lmc_ssi_set_link_status, /* set link status */
49881+ .set_crc_length = lmc_ssi_set_crc_length, /* set CRC length */
49882+ .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
49883+ .watchdog = lmc_ssi_watchdog
49884 };
49885
49886 lmc_media_t lmc_t1_media = {
49887- lmc_t1_init, /* special media init stuff */
49888- lmc_t1_default, /* reset to default state */
49889- lmc_t1_set_status, /* reset status to state provided */
49890- lmc_t1_set_clock, /* set clock source */
49891- lmc_dummy_set2_1, /* set line speed */
49892- lmc_dummy_set_1, /* set cable length */
49893- lmc_dummy_set_1, /* set scrambler */
49894- lmc_t1_get_link_status, /* get link status */
49895- lmc_dummy_set_1, /* set link status */
49896- lmc_t1_set_crc_length, /* set CRC length */
49897- lmc_t1_set_circuit_type, /* set T1 or E1 circuit type */
49898- lmc_t1_watchdog
49899+ .init = lmc_t1_init, /* special media init stuff */
49900+ .defaults = lmc_t1_default, /* reset to default state */
49901+ .set_status = lmc_t1_set_status, /* reset status to state provided */
49902+ .set_clock_source = lmc_t1_set_clock, /* set clock source */
49903+ .set_speed = lmc_dummy_set2_1, /* set line speed */
49904+ .set_cable_length = lmc_dummy_set_1, /* set cable length */
49905+ .set_scrambler = lmc_dummy_set_1, /* set scrambler */
49906+ .get_link_status = lmc_t1_get_link_status, /* get link status */
49907+ .set_link_status = lmc_dummy_set_1, /* set link status */
49908+ .set_crc_length = lmc_t1_set_crc_length, /* set CRC length */
49909+ .set_circuit_type = lmc_t1_set_circuit_type, /* set T1 or E1 circuit type */
49910+ .watchdog = lmc_t1_watchdog
49911 };
49912
49913 static void
49914diff --git a/drivers/net/wan/z85230.c b/drivers/net/wan/z85230.c
49915index feacc3b..5bac0de 100644
49916--- a/drivers/net/wan/z85230.c
49917+++ b/drivers/net/wan/z85230.c
49918@@ -485,9 +485,9 @@ static void z8530_status(struct z8530_channel *chan)
49919
49920 struct z8530_irqhandler z8530_sync =
49921 {
49922- z8530_rx,
49923- z8530_tx,
49924- z8530_status
49925+ .rx = z8530_rx,
49926+ .tx = z8530_tx,
49927+ .status = z8530_status
49928 };
49929
49930 EXPORT_SYMBOL(z8530_sync);
49931@@ -605,15 +605,15 @@ static void z8530_dma_status(struct z8530_channel *chan)
49932 }
49933
49934 static struct z8530_irqhandler z8530_dma_sync = {
49935- z8530_dma_rx,
49936- z8530_dma_tx,
49937- z8530_dma_status
49938+ .rx = z8530_dma_rx,
49939+ .tx = z8530_dma_tx,
49940+ .status = z8530_dma_status
49941 };
49942
49943 static struct z8530_irqhandler z8530_txdma_sync = {
49944- z8530_rx,
49945- z8530_dma_tx,
49946- z8530_dma_status
49947+ .rx = z8530_rx,
49948+ .tx = z8530_dma_tx,
49949+ .status = z8530_dma_status
49950 };
49951
49952 /**
49953@@ -680,9 +680,9 @@ static void z8530_status_clear(struct z8530_channel *chan)
49954
49955 struct z8530_irqhandler z8530_nop=
49956 {
49957- z8530_rx_clear,
49958- z8530_tx_clear,
49959- z8530_status_clear
49960+ .rx = z8530_rx_clear,
49961+ .tx = z8530_tx_clear,
49962+ .status = z8530_status_clear
49963 };
49964
49965
49966diff --git a/drivers/net/wimax/i2400m/rx.c b/drivers/net/wimax/i2400m/rx.c
49967index 0b60295..b8bfa5b 100644
49968--- a/drivers/net/wimax/i2400m/rx.c
49969+++ b/drivers/net/wimax/i2400m/rx.c
49970@@ -1359,7 +1359,7 @@ int i2400m_rx_setup(struct i2400m *i2400m)
49971 if (i2400m->rx_roq == NULL)
49972 goto error_roq_alloc;
49973
49974- rd = kcalloc(I2400M_RO_CIN + 1, sizeof(*i2400m->rx_roq[0].log),
49975+ rd = kcalloc(sizeof(*i2400m->rx_roq[0].log), I2400M_RO_CIN + 1,
49976 GFP_KERNEL);
49977 if (rd == NULL) {
49978 result = -ENOMEM;
49979diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
49980index e71a2ce..2268d61 100644
49981--- a/drivers/net/wireless/airo.c
49982+++ b/drivers/net/wireless/airo.c
49983@@ -7846,7 +7846,7 @@ static int writerids(struct net_device *dev, aironet_ioctl *comp) {
49984 struct airo_info *ai = dev->ml_priv;
49985 int ridcode;
49986 int enabled;
49987- static int (* writer)(struct airo_info *, u16 rid, const void *, int, int);
49988+ int (* writer)(struct airo_info *, u16 rid, const void *, int, int);
49989 unsigned char *iobuf;
49990
49991 /* Only super-user can write RIDs */
49992diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
49993index da92bfa..5a9001a 100644
49994--- a/drivers/net/wireless/at76c50x-usb.c
49995+++ b/drivers/net/wireless/at76c50x-usb.c
49996@@ -353,7 +353,7 @@ static int at76_dfu_get_state(struct usb_device *udev, u8 *state)
49997 }
49998
49999 /* Convert timeout from the DFU status to jiffies */
50000-static inline unsigned long at76_get_timeout(struct dfu_status *s)
50001+static inline unsigned long __intentional_overflow(-1) at76_get_timeout(struct dfu_status *s)
50002 {
50003 return msecs_to_jiffies((s->poll_timeout[2] << 16)
50004 | (s->poll_timeout[1] << 8)
50005diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
50006index 5fdc40d..3975205 100644
50007--- a/drivers/net/wireless/ath/ath10k/htc.c
50008+++ b/drivers/net/wireless/ath/ath10k/htc.c
50009@@ -856,7 +856,10 @@ void ath10k_htc_stop(struct ath10k_htc *htc)
50010 /* registered target arrival callback from the HIF layer */
50011 int ath10k_htc_init(struct ath10k *ar)
50012 {
50013- struct ath10k_hif_cb htc_callbacks;
50014+ static struct ath10k_hif_cb htc_callbacks = {
50015+ .rx_completion = ath10k_htc_rx_completion_handler,
50016+ .tx_completion = ath10k_htc_tx_completion_handler,
50017+ };
50018 struct ath10k_htc_ep *ep = NULL;
50019 struct ath10k_htc *htc = &ar->htc;
50020
50021@@ -866,8 +869,6 @@ int ath10k_htc_init(struct ath10k *ar)
50022 ath10k_htc_reset_endpoint_states(htc);
50023
50024 /* setup HIF layer callbacks */
50025- htc_callbacks.rx_completion = ath10k_htc_rx_completion_handler;
50026- htc_callbacks.tx_completion = ath10k_htc_tx_completion_handler;
50027 htc->ar = ar;
50028
50029 /* Get HIF default pipe for HTC message exchange */
50030diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h
50031index 4716d33..a688310 100644
50032--- a/drivers/net/wireless/ath/ath10k/htc.h
50033+++ b/drivers/net/wireless/ath/ath10k/htc.h
50034@@ -271,13 +271,13 @@ enum ath10k_htc_ep_id {
50035
50036 struct ath10k_htc_ops {
50037 void (*target_send_suspend_complete)(struct ath10k *ar);
50038-};
50039+} __no_const;
50040
50041 struct ath10k_htc_ep_ops {
50042 void (*ep_tx_complete)(struct ath10k *, struct sk_buff *);
50043 void (*ep_rx_complete)(struct ath10k *, struct sk_buff *);
50044 void (*ep_tx_credits)(struct ath10k *);
50045-};
50046+} __no_const;
50047
50048 /* service connection information */
50049 struct ath10k_htc_svc_conn_req {
50050diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
50051index 59af9f9..5f3564f 100644
50052--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
50053+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
50054@@ -220,8 +220,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
50055 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
50056 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
50057
50058- ACCESS_ONCE(ads->ds_link) = i->link;
50059- ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
50060+ ACCESS_ONCE_RW(ads->ds_link) = i->link;
50061+ ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0];
50062
50063 ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
50064 ctl6 = SM(i->keytype, AR_EncrType);
50065@@ -235,26 +235,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
50066
50067 if ((i->is_first || i->is_last) &&
50068 i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
50069- ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
50070+ ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0)
50071 | set11nTries(i->rates, 1)
50072 | set11nTries(i->rates, 2)
50073 | set11nTries(i->rates, 3)
50074 | (i->dur_update ? AR_DurUpdateEna : 0)
50075 | SM(0, AR_BurstDur);
50076
50077- ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
50078+ ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0)
50079 | set11nRate(i->rates, 1)
50080 | set11nRate(i->rates, 2)
50081 | set11nRate(i->rates, 3);
50082 } else {
50083- ACCESS_ONCE(ads->ds_ctl2) = 0;
50084- ACCESS_ONCE(ads->ds_ctl3) = 0;
50085+ ACCESS_ONCE_RW(ads->ds_ctl2) = 0;
50086+ ACCESS_ONCE_RW(ads->ds_ctl3) = 0;
50087 }
50088
50089 if (!i->is_first) {
50090- ACCESS_ONCE(ads->ds_ctl0) = 0;
50091- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
50092- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
50093+ ACCESS_ONCE_RW(ads->ds_ctl0) = 0;
50094+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
50095+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
50096 return;
50097 }
50098
50099@@ -279,7 +279,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
50100 break;
50101 }
50102
50103- ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
50104+ ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
50105 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
50106 | SM(i->txpower, AR_XmitPower0)
50107 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
50108@@ -289,27 +289,27 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
50109 | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
50110 (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
50111
50112- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
50113- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
50114+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
50115+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
50116
50117 if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
50118 return;
50119
50120- ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
50121+ ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
50122 | set11nPktDurRTSCTS(i->rates, 1);
50123
50124- ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
50125+ ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
50126 | set11nPktDurRTSCTS(i->rates, 3);
50127
50128- ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
50129+ ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
50130 | set11nRateFlags(i->rates, 1)
50131 | set11nRateFlags(i->rates, 2)
50132 | set11nRateFlags(i->rates, 3)
50133 | SM(i->rtscts_rate, AR_RTSCTSRate);
50134
50135- ACCESS_ONCE(ads->ds_ctl9) = SM(i->txpower, AR_XmitPower1);
50136- ACCESS_ONCE(ads->ds_ctl10) = SM(i->txpower, AR_XmitPower2);
50137- ACCESS_ONCE(ads->ds_ctl11) = SM(i->txpower, AR_XmitPower3);
50138+ ACCESS_ONCE_RW(ads->ds_ctl9) = SM(i->txpower, AR_XmitPower1);
50139+ ACCESS_ONCE_RW(ads->ds_ctl10) = SM(i->txpower, AR_XmitPower2);
50140+ ACCESS_ONCE_RW(ads->ds_ctl11) = SM(i->txpower, AR_XmitPower3);
50141 }
50142
50143 static int ar9002_hw_proc_txdesc(struct ath_hw *ah, void *ds,
50144diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
50145index 71e38e8..5ac96ca 100644
50146--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
50147+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
50148@@ -39,47 +39,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
50149 (i->qcu << AR_TxQcuNum_S) | desc_len;
50150
50151 checksum += val;
50152- ACCESS_ONCE(ads->info) = val;
50153+ ACCESS_ONCE_RW(ads->info) = val;
50154
50155 checksum += i->link;
50156- ACCESS_ONCE(ads->link) = i->link;
50157+ ACCESS_ONCE_RW(ads->link) = i->link;
50158
50159 checksum += i->buf_addr[0];
50160- ACCESS_ONCE(ads->data0) = i->buf_addr[0];
50161+ ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0];
50162 checksum += i->buf_addr[1];
50163- ACCESS_ONCE(ads->data1) = i->buf_addr[1];
50164+ ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1];
50165 checksum += i->buf_addr[2];
50166- ACCESS_ONCE(ads->data2) = i->buf_addr[2];
50167+ ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2];
50168 checksum += i->buf_addr[3];
50169- ACCESS_ONCE(ads->data3) = i->buf_addr[3];
50170+ ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3];
50171
50172 checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
50173- ACCESS_ONCE(ads->ctl3) = val;
50174+ ACCESS_ONCE_RW(ads->ctl3) = val;
50175 checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
50176- ACCESS_ONCE(ads->ctl5) = val;
50177+ ACCESS_ONCE_RW(ads->ctl5) = val;
50178 checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
50179- ACCESS_ONCE(ads->ctl7) = val;
50180+ ACCESS_ONCE_RW(ads->ctl7) = val;
50181 checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
50182- ACCESS_ONCE(ads->ctl9) = val;
50183+ ACCESS_ONCE_RW(ads->ctl9) = val;
50184
50185 checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
50186- ACCESS_ONCE(ads->ctl10) = checksum;
50187+ ACCESS_ONCE_RW(ads->ctl10) = checksum;
50188
50189 if (i->is_first || i->is_last) {
50190- ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
50191+ ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0)
50192 | set11nTries(i->rates, 1)
50193 | set11nTries(i->rates, 2)
50194 | set11nTries(i->rates, 3)
50195 | (i->dur_update ? AR_DurUpdateEna : 0)
50196 | SM(0, AR_BurstDur);
50197
50198- ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
50199+ ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0)
50200 | set11nRate(i->rates, 1)
50201 | set11nRate(i->rates, 2)
50202 | set11nRate(i->rates, 3);
50203 } else {
50204- ACCESS_ONCE(ads->ctl13) = 0;
50205- ACCESS_ONCE(ads->ctl14) = 0;
50206+ ACCESS_ONCE_RW(ads->ctl13) = 0;
50207+ ACCESS_ONCE_RW(ads->ctl14) = 0;
50208 }
50209
50210 ads->ctl20 = 0;
50211@@ -89,17 +89,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
50212
50213 ctl17 = SM(i->keytype, AR_EncrType);
50214 if (!i->is_first) {
50215- ACCESS_ONCE(ads->ctl11) = 0;
50216- ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
50217- ACCESS_ONCE(ads->ctl15) = 0;
50218- ACCESS_ONCE(ads->ctl16) = 0;
50219- ACCESS_ONCE(ads->ctl17) = ctl17;
50220- ACCESS_ONCE(ads->ctl18) = 0;
50221- ACCESS_ONCE(ads->ctl19) = 0;
50222+ ACCESS_ONCE_RW(ads->ctl11) = 0;
50223+ ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
50224+ ACCESS_ONCE_RW(ads->ctl15) = 0;
50225+ ACCESS_ONCE_RW(ads->ctl16) = 0;
50226+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
50227+ ACCESS_ONCE_RW(ads->ctl18) = 0;
50228+ ACCESS_ONCE_RW(ads->ctl19) = 0;
50229 return;
50230 }
50231
50232- ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
50233+ ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen)
50234 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
50235 | SM(i->txpower, AR_XmitPower0)
50236 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
50237@@ -135,26 +135,26 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
50238 val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
50239 ctl12 |= SM(val, AR_PAPRDChainMask);
50240
50241- ACCESS_ONCE(ads->ctl12) = ctl12;
50242- ACCESS_ONCE(ads->ctl17) = ctl17;
50243+ ACCESS_ONCE_RW(ads->ctl12) = ctl12;
50244+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
50245
50246- ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
50247+ ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
50248 | set11nPktDurRTSCTS(i->rates, 1);
50249
50250- ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
50251+ ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
50252 | set11nPktDurRTSCTS(i->rates, 3);
50253
50254- ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
50255+ ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0)
50256 | set11nRateFlags(i->rates, 1)
50257 | set11nRateFlags(i->rates, 2)
50258 | set11nRateFlags(i->rates, 3)
50259 | SM(i->rtscts_rate, AR_RTSCTSRate);
50260
50261- ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
50262+ ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding;
50263
50264- ACCESS_ONCE(ads->ctl20) = SM(i->txpower, AR_XmitPower1);
50265- ACCESS_ONCE(ads->ctl21) = SM(i->txpower, AR_XmitPower2);
50266- ACCESS_ONCE(ads->ctl22) = SM(i->txpower, AR_XmitPower3);
50267+ ACCESS_ONCE_RW(ads->ctl20) = SM(i->txpower, AR_XmitPower1);
50268+ ACCESS_ONCE_RW(ads->ctl21) = SM(i->txpower, AR_XmitPower2);
50269+ ACCESS_ONCE_RW(ads->ctl22) = SM(i->txpower, AR_XmitPower3);
50270 }
50271
50272 static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
50273diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
50274index 51b4ebe..d1929dd 100644
50275--- a/drivers/net/wireless/ath/ath9k/hw.h
50276+++ b/drivers/net/wireless/ath/ath9k/hw.h
50277@@ -629,7 +629,7 @@ struct ath_hw_private_ops {
50278
50279 /* ANI */
50280 void (*ani_cache_ini_regs)(struct ath_hw *ah);
50281-};
50282+} __no_const;
50283
50284 /**
50285 * struct ath_spec_scan - parameters for Atheros spectral scan
50286@@ -706,7 +706,7 @@ struct ath_hw_ops {
50287 #ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
50288 void (*set_bt_ant_diversity)(struct ath_hw *hw, bool enable);
50289 #endif
50290-};
50291+} __no_const;
50292
50293 struct ath_nf_limits {
50294 s16 max;
50295diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
50296index 4b148bb..ac738fa 100644
50297--- a/drivers/net/wireless/ath/ath9k/main.c
50298+++ b/drivers/net/wireless/ath/ath9k/main.c
50299@@ -2592,16 +2592,18 @@ void ath9k_fill_chanctx_ops(void)
50300 if (!ath9k_use_chanctx)
50301 return;
50302
50303- ath9k_ops.hw_scan = ath9k_hw_scan;
50304- ath9k_ops.cancel_hw_scan = ath9k_cancel_hw_scan;
50305- ath9k_ops.remain_on_channel = ath9k_remain_on_channel;
50306- ath9k_ops.cancel_remain_on_channel = ath9k_cancel_remain_on_channel;
50307- ath9k_ops.add_chanctx = ath9k_add_chanctx;
50308- ath9k_ops.remove_chanctx = ath9k_remove_chanctx;
50309- ath9k_ops.change_chanctx = ath9k_change_chanctx;
50310- ath9k_ops.assign_vif_chanctx = ath9k_assign_vif_chanctx;
50311- ath9k_ops.unassign_vif_chanctx = ath9k_unassign_vif_chanctx;
50312- ath9k_ops.mgd_prepare_tx = ath9k_chanctx_force_active;
50313+ pax_open_kernel();
50314+ *(void **)&ath9k_ops.hw_scan = ath9k_hw_scan;
50315+ *(void **)&ath9k_ops.cancel_hw_scan = ath9k_cancel_hw_scan;
50316+ *(void **)&ath9k_ops.remain_on_channel = ath9k_remain_on_channel;
50317+ *(void **)&ath9k_ops.cancel_remain_on_channel = ath9k_cancel_remain_on_channel;
50318+ *(void **)&ath9k_ops.add_chanctx = ath9k_add_chanctx;
50319+ *(void **)&ath9k_ops.remove_chanctx = ath9k_remove_chanctx;
50320+ *(void **)&ath9k_ops.change_chanctx = ath9k_change_chanctx;
50321+ *(void **)&ath9k_ops.assign_vif_chanctx = ath9k_assign_vif_chanctx;
50322+ *(void **)&ath9k_ops.unassign_vif_chanctx = ath9k_unassign_vif_chanctx;
50323+ *(void **)&ath9k_ops.mgd_prepare_tx = ath9k_chanctx_force_active;
50324+ pax_close_kernel();
50325 }
50326
50327 struct ieee80211_ops ath9k_ops = {
50328diff --git a/drivers/net/wireless/b43/phy_lp.c b/drivers/net/wireless/b43/phy_lp.c
50329index 92190da..f3a4c4c 100644
50330--- a/drivers/net/wireless/b43/phy_lp.c
50331+++ b/drivers/net/wireless/b43/phy_lp.c
50332@@ -2514,7 +2514,7 @@ static int lpphy_b2063_tune(struct b43_wldev *dev,
50333 {
50334 struct ssb_bus *bus = dev->dev->sdev->bus;
50335
50336- static const struct b206x_channel *chandata = NULL;
50337+ const struct b206x_channel *chandata = NULL;
50338 u32 crystal_freq = bus->chipco.pmu.crystalfreq * 1000;
50339 u32 freqref, vco_freq, val1, val2, val3, timeout, timeoutref, count;
50340 u16 old_comm15, scale;
50341diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
50342index dc1d20c..f7a4f06 100644
50343--- a/drivers/net/wireless/iwlegacy/3945-mac.c
50344+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
50345@@ -3633,7 +3633,9 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
50346 */
50347 if (il3945_mod_params.disable_hw_scan) {
50348 D_INFO("Disabling hw_scan\n");
50349- il3945_mac_ops.hw_scan = NULL;
50350+ pax_open_kernel();
50351+ *(void **)&il3945_mac_ops.hw_scan = NULL;
50352+ pax_close_kernel();
50353 }
50354
50355 D_INFO("*** LOAD DRIVER ***\n");
50356diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
50357index 0ffb6ff..c0b7f0e 100644
50358--- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c
50359+++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
50360@@ -188,7 +188,7 @@ static ssize_t iwl_dbgfs_sram_write(struct file *file,
50361 {
50362 struct iwl_priv *priv = file->private_data;
50363 char buf[64];
50364- int buf_size;
50365+ size_t buf_size;
50366 u32 offset, len;
50367
50368 memset(buf, 0, sizeof(buf));
50369@@ -458,7 +458,7 @@ static ssize_t iwl_dbgfs_rx_handlers_write(struct file *file,
50370 struct iwl_priv *priv = file->private_data;
50371
50372 char buf[8];
50373- int buf_size;
50374+ size_t buf_size;
50375 u32 reset_flag;
50376
50377 memset(buf, 0, sizeof(buf));
50378@@ -539,7 +539,7 @@ static ssize_t iwl_dbgfs_disable_ht40_write(struct file *file,
50379 {
50380 struct iwl_priv *priv = file->private_data;
50381 char buf[8];
50382- int buf_size;
50383+ size_t buf_size;
50384 int ht40;
50385
50386 memset(buf, 0, sizeof(buf));
50387@@ -591,7 +591,7 @@ static ssize_t iwl_dbgfs_sleep_level_override_write(struct file *file,
50388 {
50389 struct iwl_priv *priv = file->private_data;
50390 char buf[8];
50391- int buf_size;
50392+ size_t buf_size;
50393 int value;
50394
50395 memset(buf, 0, sizeof(buf));
50396@@ -683,10 +683,10 @@ DEBUGFS_READ_FILE_OPS(temperature);
50397 DEBUGFS_READ_WRITE_FILE_OPS(sleep_level_override);
50398 DEBUGFS_READ_FILE_OPS(current_sleep_command);
50399
50400-static const char *fmt_value = " %-30s %10u\n";
50401-static const char *fmt_hex = " %-30s 0x%02X\n";
50402-static const char *fmt_table = " %-30s %10u %10u %10u %10u\n";
50403-static const char *fmt_header =
50404+static const char fmt_value[] = " %-30s %10u\n";
50405+static const char fmt_hex[] = " %-30s 0x%02X\n";
50406+static const char fmt_table[] = " %-30s %10u %10u %10u %10u\n";
50407+static const char fmt_header[] =
50408 "%-32s current cumulative delta max\n";
50409
50410 static int iwl_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
50411@@ -1856,7 +1856,7 @@ static ssize_t iwl_dbgfs_clear_ucode_statistics_write(struct file *file,
50412 {
50413 struct iwl_priv *priv = file->private_data;
50414 char buf[8];
50415- int buf_size;
50416+ size_t buf_size;
50417 int clear;
50418
50419 memset(buf, 0, sizeof(buf));
50420@@ -1901,7 +1901,7 @@ static ssize_t iwl_dbgfs_ucode_tracing_write(struct file *file,
50421 {
50422 struct iwl_priv *priv = file->private_data;
50423 char buf[8];
50424- int buf_size;
50425+ size_t buf_size;
50426 int trace;
50427
50428 memset(buf, 0, sizeof(buf));
50429@@ -1972,7 +1972,7 @@ static ssize_t iwl_dbgfs_missed_beacon_write(struct file *file,
50430 {
50431 struct iwl_priv *priv = file->private_data;
50432 char buf[8];
50433- int buf_size;
50434+ size_t buf_size;
50435 int missed;
50436
50437 memset(buf, 0, sizeof(buf));
50438@@ -2013,7 +2013,7 @@ static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file,
50439
50440 struct iwl_priv *priv = file->private_data;
50441 char buf[8];
50442- int buf_size;
50443+ size_t buf_size;
50444 int plcp;
50445
50446 memset(buf, 0, sizeof(buf));
50447@@ -2073,7 +2073,7 @@ static ssize_t iwl_dbgfs_txfifo_flush_write(struct file *file,
50448
50449 struct iwl_priv *priv = file->private_data;
50450 char buf[8];
50451- int buf_size;
50452+ size_t buf_size;
50453 int flush;
50454
50455 memset(buf, 0, sizeof(buf));
50456@@ -2163,7 +2163,7 @@ static ssize_t iwl_dbgfs_protection_mode_write(struct file *file,
50457
50458 struct iwl_priv *priv = file->private_data;
50459 char buf[8];
50460- int buf_size;
50461+ size_t buf_size;
50462 int rts;
50463
50464 if (!priv->cfg->ht_params)
50465@@ -2204,7 +2204,7 @@ static ssize_t iwl_dbgfs_echo_test_write(struct file *file,
50466 {
50467 struct iwl_priv *priv = file->private_data;
50468 char buf[8];
50469- int buf_size;
50470+ size_t buf_size;
50471
50472 memset(buf, 0, sizeof(buf));
50473 buf_size = min(count, sizeof(buf) - 1);
50474@@ -2238,7 +2238,7 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file,
50475 struct iwl_priv *priv = file->private_data;
50476 u32 event_log_flag;
50477 char buf[8];
50478- int buf_size;
50479+ size_t buf_size;
50480
50481 /* check that the interface is up */
50482 if (!iwl_is_ready(priv))
50483@@ -2292,7 +2292,7 @@ static ssize_t iwl_dbgfs_calib_disabled_write(struct file *file,
50484 struct iwl_priv *priv = file->private_data;
50485 char buf[8];
50486 u32 calib_disabled;
50487- int buf_size;
50488+ size_t buf_size;
50489
50490 memset(buf, 0, sizeof(buf));
50491 buf_size = min(count, sizeof(buf) - 1);
50492diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
50493index 06e04aa..d5e1f0d 100644
50494--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
50495+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
50496@@ -1684,7 +1684,7 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
50497 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
50498
50499 char buf[8];
50500- int buf_size;
50501+ size_t buf_size;
50502 u32 reset_flag;
50503
50504 memset(buf, 0, sizeof(buf));
50505@@ -1705,7 +1705,7 @@ static ssize_t iwl_dbgfs_csr_write(struct file *file,
50506 {
50507 struct iwl_trans *trans = file->private_data;
50508 char buf[8];
50509- int buf_size;
50510+ size_t buf_size;
50511 int csr;
50512
50513 memset(buf, 0, sizeof(buf));
50514diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
50515index 1326f61..9e56010f 100644
50516--- a/drivers/net/wireless/mac80211_hwsim.c
50517+++ b/drivers/net/wireless/mac80211_hwsim.c
50518@@ -2575,20 +2575,20 @@ static int __init init_mac80211_hwsim(void)
50519 if (channels < 1)
50520 return -EINVAL;
50521
50522- mac80211_hwsim_mchan_ops = mac80211_hwsim_ops;
50523- mac80211_hwsim_mchan_ops.hw_scan = mac80211_hwsim_hw_scan;
50524- mac80211_hwsim_mchan_ops.cancel_hw_scan = mac80211_hwsim_cancel_hw_scan;
50525- mac80211_hwsim_mchan_ops.sw_scan_start = NULL;
50526- mac80211_hwsim_mchan_ops.sw_scan_complete = NULL;
50527- mac80211_hwsim_mchan_ops.remain_on_channel = mac80211_hwsim_roc;
50528- mac80211_hwsim_mchan_ops.cancel_remain_on_channel = mac80211_hwsim_croc;
50529- mac80211_hwsim_mchan_ops.add_chanctx = mac80211_hwsim_add_chanctx;
50530- mac80211_hwsim_mchan_ops.remove_chanctx = mac80211_hwsim_remove_chanctx;
50531- mac80211_hwsim_mchan_ops.change_chanctx = mac80211_hwsim_change_chanctx;
50532- mac80211_hwsim_mchan_ops.assign_vif_chanctx =
50533- mac80211_hwsim_assign_vif_chanctx;
50534- mac80211_hwsim_mchan_ops.unassign_vif_chanctx =
50535- mac80211_hwsim_unassign_vif_chanctx;
50536+ pax_open_kernel();
50537+ memcpy((void *)&mac80211_hwsim_mchan_ops, &mac80211_hwsim_ops, sizeof mac80211_hwsim_mchan_ops);
50538+ *(void **)&mac80211_hwsim_mchan_ops.hw_scan = mac80211_hwsim_hw_scan;
50539+ *(void **)&mac80211_hwsim_mchan_ops.cancel_hw_scan = mac80211_hwsim_cancel_hw_scan;
50540+ *(void **)&mac80211_hwsim_mchan_ops.sw_scan_start = NULL;
50541+ *(void **)&mac80211_hwsim_mchan_ops.sw_scan_complete = NULL;
50542+ *(void **)&mac80211_hwsim_mchan_ops.remain_on_channel = mac80211_hwsim_roc;
50543+ *(void **)&mac80211_hwsim_mchan_ops.cancel_remain_on_channel = mac80211_hwsim_croc;
50544+ *(void **)&mac80211_hwsim_mchan_ops.add_chanctx = mac80211_hwsim_add_chanctx;
50545+ *(void **)&mac80211_hwsim_mchan_ops.remove_chanctx = mac80211_hwsim_remove_chanctx;
50546+ *(void **)&mac80211_hwsim_mchan_ops.change_chanctx = mac80211_hwsim_change_chanctx;
50547+ *(void **)&mac80211_hwsim_mchan_ops.assign_vif_chanctx = mac80211_hwsim_assign_vif_chanctx;
50548+ *(void **)&mac80211_hwsim_mchan_ops.unassign_vif_chanctx = mac80211_hwsim_unassign_vif_chanctx;
50549+ pax_close_kernel();
50550
50551 spin_lock_init(&hwsim_radio_lock);
50552 INIT_LIST_HEAD(&hwsim_radios);
50553diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
50554index d2a9a08..0cb175d 100644
50555--- a/drivers/net/wireless/rndis_wlan.c
50556+++ b/drivers/net/wireless/rndis_wlan.c
50557@@ -1236,7 +1236,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
50558
50559 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
50560
50561- if (rts_threshold < 0 || rts_threshold > 2347)
50562+ if (rts_threshold > 2347)
50563 rts_threshold = 2347;
50564
50565 tmp = cpu_to_le32(rts_threshold);
50566diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
50567index d13f25c..2573994 100644
50568--- a/drivers/net/wireless/rt2x00/rt2x00.h
50569+++ b/drivers/net/wireless/rt2x00/rt2x00.h
50570@@ -375,7 +375,7 @@ struct rt2x00_intf {
50571 * for hardware which doesn't support hardware
50572 * sequence counting.
50573 */
50574- atomic_t seqno;
50575+ atomic_unchecked_t seqno;
50576 };
50577
50578 static inline struct rt2x00_intf* vif_to_intf(struct ieee80211_vif *vif)
50579diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
50580index 8e68f87..c35ba29 100644
50581--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
50582+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
50583@@ -250,9 +250,9 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
50584 * sequence counter given by mac80211.
50585 */
50586 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
50587- seqno = atomic_add_return(0x10, &intf->seqno);
50588+ seqno = atomic_add_return_unchecked(0x10, &intf->seqno);
50589 else
50590- seqno = atomic_read(&intf->seqno);
50591+ seqno = atomic_read_unchecked(&intf->seqno);
50592
50593 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
50594 hdr->seq_ctrl |= cpu_to_le16(seqno);
50595diff --git a/drivers/net/wireless/ti/wl1251/sdio.c b/drivers/net/wireless/ti/wl1251/sdio.c
50596index b661f896..ddf7d2b 100644
50597--- a/drivers/net/wireless/ti/wl1251/sdio.c
50598+++ b/drivers/net/wireless/ti/wl1251/sdio.c
50599@@ -282,13 +282,17 @@ static int wl1251_sdio_probe(struct sdio_func *func,
50600
50601 irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
50602
50603- wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
50604- wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
50605+ pax_open_kernel();
50606+ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
50607+ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
50608+ pax_close_kernel();
50609
50610 wl1251_info("using dedicated interrupt line");
50611 } else {
50612- wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
50613- wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
50614+ pax_open_kernel();
50615+ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
50616+ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
50617+ pax_close_kernel();
50618
50619 wl1251_info("using SDIO interrupt");
50620 }
50621diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
50622index 0bccf12..3d95068 100644
50623--- a/drivers/net/wireless/ti/wl12xx/main.c
50624+++ b/drivers/net/wireless/ti/wl12xx/main.c
50625@@ -656,7 +656,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
50626 sizeof(wl->conf.mem));
50627
50628 /* read data preparation is only needed by wl127x */
50629- wl->ops->prepare_read = wl127x_prepare_read;
50630+ pax_open_kernel();
50631+ *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
50632+ pax_close_kernel();
50633
50634 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER,
50635 WL127X_IFTYPE_SR_VER, WL127X_MAJOR_SR_VER,
50636@@ -681,7 +683,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
50637 sizeof(wl->conf.mem));
50638
50639 /* read data preparation is only needed by wl127x */
50640- wl->ops->prepare_read = wl127x_prepare_read;
50641+ pax_open_kernel();
50642+ *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
50643+ pax_close_kernel();
50644
50645 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER,
50646 WL127X_IFTYPE_SR_VER, WL127X_MAJOR_SR_VER,
50647diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
50648index 7af1936..128bb35 100644
50649--- a/drivers/net/wireless/ti/wl18xx/main.c
50650+++ b/drivers/net/wireless/ti/wl18xx/main.c
50651@@ -1916,8 +1916,10 @@ static int wl18xx_setup(struct wl1271 *wl)
50652 }
50653
50654 if (!checksum_param) {
50655- wl18xx_ops.set_rx_csum = NULL;
50656- wl18xx_ops.init_vif = NULL;
50657+ pax_open_kernel();
50658+ *(void **)&wl18xx_ops.set_rx_csum = NULL;
50659+ *(void **)&wl18xx_ops.init_vif = NULL;
50660+ pax_close_kernel();
50661 }
50662
50663 /* Enable 11a Band only if we have 5G antennas */
50664diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
50665index a912dc0..a8225ba 100644
50666--- a/drivers/net/wireless/zd1211rw/zd_usb.c
50667+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
50668@@ -385,7 +385,7 @@ static inline void handle_regs_int(struct urb *urb)
50669 {
50670 struct zd_usb *usb = urb->context;
50671 struct zd_usb_interrupt *intr = &usb->intr;
50672- int len;
50673+ unsigned int len;
50674 u16 int_num;
50675
50676 ZD_ASSERT(in_interrupt());
50677diff --git a/drivers/nfc/nfcwilink.c b/drivers/nfc/nfcwilink.c
50678index 683671a..4519fc2 100644
50679--- a/drivers/nfc/nfcwilink.c
50680+++ b/drivers/nfc/nfcwilink.c
50681@@ -497,7 +497,7 @@ static struct nci_ops nfcwilink_ops = {
50682
50683 static int nfcwilink_probe(struct platform_device *pdev)
50684 {
50685- static struct nfcwilink *drv;
50686+ struct nfcwilink *drv;
50687 int rc;
50688 __u32 protocols;
50689
50690diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
50691index d93b2b6..ae50401 100644
50692--- a/drivers/oprofile/buffer_sync.c
50693+++ b/drivers/oprofile/buffer_sync.c
50694@@ -332,7 +332,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
50695 if (cookie == NO_COOKIE)
50696 offset = pc;
50697 if (cookie == INVALID_COOKIE) {
50698- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
50699+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
50700 offset = pc;
50701 }
50702 if (cookie != last_cookie) {
50703@@ -376,14 +376,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
50704 /* add userspace sample */
50705
50706 if (!mm) {
50707- atomic_inc(&oprofile_stats.sample_lost_no_mm);
50708+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
50709 return 0;
50710 }
50711
50712 cookie = lookup_dcookie(mm, s->eip, &offset);
50713
50714 if (cookie == INVALID_COOKIE) {
50715- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
50716+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
50717 return 0;
50718 }
50719
50720@@ -552,7 +552,7 @@ void sync_buffer(int cpu)
50721 /* ignore backtraces if failed to add a sample */
50722 if (state == sb_bt_start) {
50723 state = sb_bt_ignore;
50724- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
50725+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
50726 }
50727 }
50728 release_mm(mm);
50729diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
50730index c0cc4e7..44d4e54 100644
50731--- a/drivers/oprofile/event_buffer.c
50732+++ b/drivers/oprofile/event_buffer.c
50733@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
50734 }
50735
50736 if (buffer_pos == buffer_size) {
50737- atomic_inc(&oprofile_stats.event_lost_overflow);
50738+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
50739 return;
50740 }
50741
50742diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
50743index ed2c3ec..deda85a 100644
50744--- a/drivers/oprofile/oprof.c
50745+++ b/drivers/oprofile/oprof.c
50746@@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
50747 if (oprofile_ops.switch_events())
50748 return;
50749
50750- atomic_inc(&oprofile_stats.multiplex_counter);
50751+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
50752 start_switch_worker();
50753 }
50754
50755diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c
50756index ee2cfce..7f8f699 100644
50757--- a/drivers/oprofile/oprofile_files.c
50758+++ b/drivers/oprofile/oprofile_files.c
50759@@ -27,7 +27,7 @@ unsigned long oprofile_time_slice;
50760
50761 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
50762
50763-static ssize_t timeout_read(struct file *file, char __user *buf,
50764+static ssize_t __intentional_overflow(-1) timeout_read(struct file *file, char __user *buf,
50765 size_t count, loff_t *offset)
50766 {
50767 return oprofilefs_ulong_to_user(jiffies_to_msecs(oprofile_time_slice),
50768diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
50769index 59659ce..6c860a0 100644
50770--- a/drivers/oprofile/oprofile_stats.c
50771+++ b/drivers/oprofile/oprofile_stats.c
50772@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
50773 cpu_buf->sample_invalid_eip = 0;
50774 }
50775
50776- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
50777- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
50778- atomic_set(&oprofile_stats.event_lost_overflow, 0);
50779- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
50780- atomic_set(&oprofile_stats.multiplex_counter, 0);
50781+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
50782+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
50783+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
50784+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
50785+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
50786 }
50787
50788
50789diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
50790index 1fc622b..8c48fc3 100644
50791--- a/drivers/oprofile/oprofile_stats.h
50792+++ b/drivers/oprofile/oprofile_stats.h
50793@@ -13,11 +13,11 @@
50794 #include <linux/atomic.h>
50795
50796 struct oprofile_stat_struct {
50797- atomic_t sample_lost_no_mm;
50798- atomic_t sample_lost_no_mapping;
50799- atomic_t bt_lost_no_mapping;
50800- atomic_t event_lost_overflow;
50801- atomic_t multiplex_counter;
50802+ atomic_unchecked_t sample_lost_no_mm;
50803+ atomic_unchecked_t sample_lost_no_mapping;
50804+ atomic_unchecked_t bt_lost_no_mapping;
50805+ atomic_unchecked_t event_lost_overflow;
50806+ atomic_unchecked_t multiplex_counter;
50807 };
50808
50809 extern struct oprofile_stat_struct oprofile_stats;
50810diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
50811index 3f49345..c750d0b 100644
50812--- a/drivers/oprofile/oprofilefs.c
50813+++ b/drivers/oprofile/oprofilefs.c
50814@@ -176,8 +176,8 @@ int oprofilefs_create_ro_ulong(struct dentry *root,
50815
50816 static ssize_t atomic_read_file(struct file *file, char __user *buf, size_t count, loff_t *offset)
50817 {
50818- atomic_t *val = file->private_data;
50819- return oprofilefs_ulong_to_user(atomic_read(val), buf, count, offset);
50820+ atomic_unchecked_t *val = file->private_data;
50821+ return oprofilefs_ulong_to_user(atomic_read_unchecked(val), buf, count, offset);
50822 }
50823
50824
50825@@ -189,7 +189,7 @@ static const struct file_operations atomic_ro_fops = {
50826
50827
50828 int oprofilefs_create_ro_atomic(struct dentry *root,
50829- char const *name, atomic_t *val)
50830+ char const *name, atomic_unchecked_t *val)
50831 {
50832 return __oprofilefs_create_file(root, name,
50833 &atomic_ro_fops, 0444, val);
50834diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c
50835index 61be1d9..dec05d7 100644
50836--- a/drivers/oprofile/timer_int.c
50837+++ b/drivers/oprofile/timer_int.c
50838@@ -93,7 +93,7 @@ static int oprofile_cpu_notify(struct notifier_block *self,
50839 return NOTIFY_OK;
50840 }
50841
50842-static struct notifier_block __refdata oprofile_cpu_notifier = {
50843+static struct notifier_block oprofile_cpu_notifier = {
50844 .notifier_call = oprofile_cpu_notify,
50845 };
50846
50847diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
50848index 3b47080..6cd05dd 100644
50849--- a/drivers/parport/procfs.c
50850+++ b/drivers/parport/procfs.c
50851@@ -64,7 +64,7 @@ static int do_active_device(struct ctl_table *table, int write,
50852
50853 *ppos += len;
50854
50855- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
50856+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
50857 }
50858
50859 #ifdef CONFIG_PARPORT_1284
50860@@ -106,7 +106,7 @@ static int do_autoprobe(struct ctl_table *table, int write,
50861
50862 *ppos += len;
50863
50864- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
50865+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
50866 }
50867 #endif /* IEEE1284.3 support. */
50868
50869diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
50870index 8dcccff..35d701d 100644
50871--- a/drivers/pci/hotplug/acpiphp_ibm.c
50872+++ b/drivers/pci/hotplug/acpiphp_ibm.c
50873@@ -452,7 +452,9 @@ static int __init ibm_acpiphp_init(void)
50874 goto init_cleanup;
50875 }
50876
50877- ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
50878+ pax_open_kernel();
50879+ *(size_t *)&ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
50880+ pax_close_kernel();
50881 retval = sysfs_create_bin_file(sysdir, &ibm_apci_table_attr);
50882
50883 return retval;
50884diff --git a/drivers/pci/hotplug/cpcihp_generic.c b/drivers/pci/hotplug/cpcihp_generic.c
50885index 04fcd78..39e83f1 100644
50886--- a/drivers/pci/hotplug/cpcihp_generic.c
50887+++ b/drivers/pci/hotplug/cpcihp_generic.c
50888@@ -73,7 +73,6 @@ static u16 port;
50889 static unsigned int enum_bit;
50890 static u8 enum_mask;
50891
50892-static struct cpci_hp_controller_ops generic_hpc_ops;
50893 static struct cpci_hp_controller generic_hpc;
50894
50895 static int __init validate_parameters(void)
50896@@ -139,6 +138,10 @@ static int query_enum(void)
50897 return ((value & enum_mask) == enum_mask);
50898 }
50899
50900+static struct cpci_hp_controller_ops generic_hpc_ops = {
50901+ .query_enum = query_enum,
50902+};
50903+
50904 static int __init cpcihp_generic_init(void)
50905 {
50906 int status;
50907@@ -165,7 +168,6 @@ static int __init cpcihp_generic_init(void)
50908 pci_dev_put(dev);
50909
50910 memset(&generic_hpc, 0, sizeof (struct cpci_hp_controller));
50911- generic_hpc_ops.query_enum = query_enum;
50912 generic_hpc.ops = &generic_hpc_ops;
50913
50914 status = cpci_hp_register_controller(&generic_hpc);
50915diff --git a/drivers/pci/hotplug/cpcihp_zt5550.c b/drivers/pci/hotplug/cpcihp_zt5550.c
50916index 6757b3e..d3bad62 100644
50917--- a/drivers/pci/hotplug/cpcihp_zt5550.c
50918+++ b/drivers/pci/hotplug/cpcihp_zt5550.c
50919@@ -59,7 +59,6 @@
50920 /* local variables */
50921 static bool debug;
50922 static bool poll;
50923-static struct cpci_hp_controller_ops zt5550_hpc_ops;
50924 static struct cpci_hp_controller zt5550_hpc;
50925
50926 /* Primary cPCI bus bridge device */
50927@@ -205,6 +204,10 @@ static int zt5550_hc_disable_irq(void)
50928 return 0;
50929 }
50930
50931+static struct cpci_hp_controller_ops zt5550_hpc_ops = {
50932+ .query_enum = zt5550_hc_query_enum,
50933+};
50934+
50935 static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
50936 {
50937 int status;
50938@@ -216,16 +219,17 @@ static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id
50939 dbg("returned from zt5550_hc_config");
50940
50941 memset(&zt5550_hpc, 0, sizeof (struct cpci_hp_controller));
50942- zt5550_hpc_ops.query_enum = zt5550_hc_query_enum;
50943 zt5550_hpc.ops = &zt5550_hpc_ops;
50944 if(!poll) {
50945 zt5550_hpc.irq = hc_dev->irq;
50946 zt5550_hpc.irq_flags = IRQF_SHARED;
50947 zt5550_hpc.dev_id = hc_dev;
50948
50949- zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
50950- zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
50951- zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
50952+ pax_open_kernel();
50953+ *(void **)&zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
50954+ *(void **)&zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
50955+ *(void **)&zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
50956+ pax_open_kernel();
50957 } else {
50958 info("using ENUM# polling mode");
50959 }
50960diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
50961index 0968a9b..5a00edf 100644
50962--- a/drivers/pci/hotplug/cpqphp_nvram.c
50963+++ b/drivers/pci/hotplug/cpqphp_nvram.c
50964@@ -427,9 +427,13 @@ static u32 store_HRT (void __iomem *rom_start)
50965
50966 void compaq_nvram_init (void __iomem *rom_start)
50967 {
50968+
50969+#ifndef CONFIG_PAX_KERNEXEC
50970 if (rom_start) {
50971 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
50972 }
50973+#endif
50974+
50975 dbg("int15 entry = %p\n", compaq_int15_entry_point);
50976
50977 /* initialize our int15 lock */
50978diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
50979index 56d8486..f26113f 100644
50980--- a/drivers/pci/hotplug/pci_hotplug_core.c
50981+++ b/drivers/pci/hotplug/pci_hotplug_core.c
50982@@ -436,8 +436,10 @@ int __pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus,
50983 return -EINVAL;
50984 }
50985
50986- slot->ops->owner = owner;
50987- slot->ops->mod_name = mod_name;
50988+ pax_open_kernel();
50989+ *(struct module **)&slot->ops->owner = owner;
50990+ *(const char **)&slot->ops->mod_name = mod_name;
50991+ pax_close_kernel();
50992
50993 mutex_lock(&pci_hp_mutex);
50994 /*
50995diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
50996index 07aa722..84514b4 100644
50997--- a/drivers/pci/hotplug/pciehp_core.c
50998+++ b/drivers/pci/hotplug/pciehp_core.c
50999@@ -92,7 +92,7 @@ static int init_slot(struct controller *ctrl)
51000 struct slot *slot = ctrl->slot;
51001 struct hotplug_slot *hotplug = NULL;
51002 struct hotplug_slot_info *info = NULL;
51003- struct hotplug_slot_ops *ops = NULL;
51004+ hotplug_slot_ops_no_const *ops = NULL;
51005 char name[SLOT_NAME_SIZE];
51006 int retval = -ENOMEM;
51007
51008diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
51009index 5a40516..136d5a7 100644
51010--- a/drivers/pci/msi.c
51011+++ b/drivers/pci/msi.c
51012@@ -507,8 +507,8 @@ static int populate_msi_sysfs(struct pci_dev *pdev)
51013 {
51014 struct attribute **msi_attrs;
51015 struct attribute *msi_attr;
51016- struct device_attribute *msi_dev_attr;
51017- struct attribute_group *msi_irq_group;
51018+ device_attribute_no_const *msi_dev_attr;
51019+ attribute_group_no_const *msi_irq_group;
51020 const struct attribute_group **msi_irq_groups;
51021 struct msi_desc *entry;
51022 int ret = -ENOMEM;
51023@@ -568,7 +568,7 @@ error_attrs:
51024 count = 0;
51025 msi_attr = msi_attrs[count];
51026 while (msi_attr) {
51027- msi_dev_attr = container_of(msi_attr, struct device_attribute, attr);
51028+ msi_dev_attr = container_of(msi_attr, device_attribute_no_const, attr);
51029 kfree(msi_attr->name);
51030 kfree(msi_dev_attr);
51031 ++count;
51032diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
51033index 76ef791..adc3bd1 100644
51034--- a/drivers/pci/pci-sysfs.c
51035+++ b/drivers/pci/pci-sysfs.c
51036@@ -1134,7 +1134,7 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine)
51037 {
51038 /* allocate attribute structure, piggyback attribute name */
51039 int name_len = write_combine ? 13 : 10;
51040- struct bin_attribute *res_attr;
51041+ bin_attribute_no_const *res_attr;
51042 int retval;
51043
51044 res_attr = kzalloc(sizeof(*res_attr) + name_len, GFP_ATOMIC);
51045@@ -1311,7 +1311,7 @@ static struct device_attribute reset_attr = __ATTR(reset, 0200, NULL, reset_stor
51046 static int pci_create_capabilities_sysfs(struct pci_dev *dev)
51047 {
51048 int retval;
51049- struct bin_attribute *attr;
51050+ bin_attribute_no_const *attr;
51051
51052 /* If the device has VPD, try to expose it in sysfs. */
51053 if (dev->vpd) {
51054@@ -1358,7 +1358,7 @@ int __must_check pci_create_sysfs_dev_files(struct pci_dev *pdev)
51055 {
51056 int retval;
51057 int rom_size = 0;
51058- struct bin_attribute *attr;
51059+ bin_attribute_no_const *attr;
51060
51061 if (!sysfs_initialized)
51062 return -EACCES;
51063diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
51064index 0601890..dc15007 100644
51065--- a/drivers/pci/pci.h
51066+++ b/drivers/pci/pci.h
51067@@ -91,7 +91,7 @@ struct pci_vpd_ops {
51068 struct pci_vpd {
51069 unsigned int len;
51070 const struct pci_vpd_ops *ops;
51071- struct bin_attribute *attr; /* descriptor for sysfs VPD entry */
51072+ bin_attribute_no_const *attr; /* descriptor for sysfs VPD entry */
51073 };
51074
51075 int pci_vpd_pci22_init(struct pci_dev *dev);
51076diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
51077index e1e7026..d28dd33 100644
51078--- a/drivers/pci/pcie/aspm.c
51079+++ b/drivers/pci/pcie/aspm.c
51080@@ -27,9 +27,9 @@
51081 #define MODULE_PARAM_PREFIX "pcie_aspm."
51082
51083 /* Note: those are not register definitions */
51084-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
51085-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
51086-#define ASPM_STATE_L1 (4) /* L1 state */
51087+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
51088+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
51089+#define ASPM_STATE_L1 (4U) /* L1 state */
51090 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
51091 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
51092
51093diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
51094index 4170113..7cc5339 100644
51095--- a/drivers/pci/probe.c
51096+++ b/drivers/pci/probe.c
51097@@ -176,7 +176,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
51098 struct pci_bus_region region, inverted_region;
51099 bool bar_too_big = false, bar_too_high = false, bar_invalid = false;
51100
51101- mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
51102+ mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
51103
51104 /* No printks while decoding is disabled! */
51105 if (!dev->mmio_always_on) {
51106diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
51107index 3f155e7..0f4b1f0 100644
51108--- a/drivers/pci/proc.c
51109+++ b/drivers/pci/proc.c
51110@@ -434,7 +434,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
51111 static int __init pci_proc_init(void)
51112 {
51113 struct pci_dev *dev = NULL;
51114+
51115+#ifdef CONFIG_GRKERNSEC_PROC_ADD
51116+#ifdef CONFIG_GRKERNSEC_PROC_USER
51117+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
51118+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
51119+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
51120+#endif
51121+#else
51122 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
51123+#endif
51124 proc_create("devices", 0, proc_bus_pci_dir,
51125 &proc_bus_pci_dev_operations);
51126 proc_initialized = 1;
51127diff --git a/drivers/platform/chrome/chromeos_laptop.c b/drivers/platform/chrome/chromeos_laptop.c
51128index d866db8..c827d1f 100644
51129--- a/drivers/platform/chrome/chromeos_laptop.c
51130+++ b/drivers/platform/chrome/chromeos_laptop.c
51131@@ -479,7 +479,7 @@ static struct chromeos_laptop cr48 = {
51132 .callback = chromeos_laptop_dmi_matched, \
51133 .driver_data = (void *)&board_
51134
51135-static struct dmi_system_id chromeos_laptop_dmi_table[] __initdata = {
51136+static struct dmi_system_id chromeos_laptop_dmi_table[] __initconst = {
51137 {
51138 .ident = "Samsung Series 5 550",
51139 .matches = {
51140diff --git a/drivers/platform/x86/alienware-wmi.c b/drivers/platform/x86/alienware-wmi.c
51141index c5af23b..3d62d5e 100644
51142--- a/drivers/platform/x86/alienware-wmi.c
51143+++ b/drivers/platform/x86/alienware-wmi.c
51144@@ -150,7 +150,7 @@ struct wmax_led_args {
51145 } __packed;
51146
51147 static struct platform_device *platform_device;
51148-static struct device_attribute *zone_dev_attrs;
51149+static device_attribute_no_const *zone_dev_attrs;
51150 static struct attribute **zone_attrs;
51151 static struct platform_zone *zone_data;
51152
51153@@ -161,7 +161,7 @@ static struct platform_driver platform_driver = {
51154 }
51155 };
51156
51157-static struct attribute_group zone_attribute_group = {
51158+static attribute_group_no_const zone_attribute_group = {
51159 .name = "rgb_zones",
51160 };
51161
51162diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
51163index 21fc932..ee9394a 100644
51164--- a/drivers/platform/x86/asus-wmi.c
51165+++ b/drivers/platform/x86/asus-wmi.c
51166@@ -1590,6 +1590,10 @@ static int show_dsts(struct seq_file *m, void *data)
51167 int err;
51168 u32 retval = -1;
51169
51170+#ifdef CONFIG_GRKERNSEC_KMEM
51171+ return -EPERM;
51172+#endif
51173+
51174 err = asus_wmi_get_devstate(asus, asus->debug.dev_id, &retval);
51175
51176 if (err < 0)
51177@@ -1606,6 +1610,10 @@ static int show_devs(struct seq_file *m, void *data)
51178 int err;
51179 u32 retval = -1;
51180
51181+#ifdef CONFIG_GRKERNSEC_KMEM
51182+ return -EPERM;
51183+#endif
51184+
51185 err = asus_wmi_set_devstate(asus->debug.dev_id, asus->debug.ctrl_param,
51186 &retval);
51187
51188@@ -1630,6 +1638,10 @@ static int show_call(struct seq_file *m, void *data)
51189 union acpi_object *obj;
51190 acpi_status status;
51191
51192+#ifdef CONFIG_GRKERNSEC_KMEM
51193+ return -EPERM;
51194+#endif
51195+
51196 status = wmi_evaluate_method(ASUS_WMI_MGMT_GUID,
51197 1, asus->debug.method_id,
51198 &input, &output);
51199diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
51200index 62f8030..c7f2a45 100644
51201--- a/drivers/platform/x86/msi-laptop.c
51202+++ b/drivers/platform/x86/msi-laptop.c
51203@@ -1000,12 +1000,14 @@ static int __init load_scm_model_init(struct platform_device *sdev)
51204
51205 if (!quirks->ec_read_only) {
51206 /* allow userland write sysfs file */
51207- dev_attr_bluetooth.store = store_bluetooth;
51208- dev_attr_wlan.store = store_wlan;
51209- dev_attr_threeg.store = store_threeg;
51210- dev_attr_bluetooth.attr.mode |= S_IWUSR;
51211- dev_attr_wlan.attr.mode |= S_IWUSR;
51212- dev_attr_threeg.attr.mode |= S_IWUSR;
51213+ pax_open_kernel();
51214+ *(void **)&dev_attr_bluetooth.store = store_bluetooth;
51215+ *(void **)&dev_attr_wlan.store = store_wlan;
51216+ *(void **)&dev_attr_threeg.store = store_threeg;
51217+ *(umode_t *)&dev_attr_bluetooth.attr.mode |= S_IWUSR;
51218+ *(umode_t *)&dev_attr_wlan.attr.mode |= S_IWUSR;
51219+ *(umode_t *)&dev_attr_threeg.attr.mode |= S_IWUSR;
51220+ pax_close_kernel();
51221 }
51222
51223 /* disable hardware control by fn key */
51224diff --git a/drivers/platform/x86/msi-wmi.c b/drivers/platform/x86/msi-wmi.c
51225index 70222f2..8c8ce66 100644
51226--- a/drivers/platform/x86/msi-wmi.c
51227+++ b/drivers/platform/x86/msi-wmi.c
51228@@ -183,7 +183,7 @@ static const struct backlight_ops msi_backlight_ops = {
51229 static void msi_wmi_notify(u32 value, void *context)
51230 {
51231 struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
51232- static struct key_entry *key;
51233+ struct key_entry *key;
51234 union acpi_object *obj;
51235 acpi_status status;
51236
51237diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
51238index 26ad9ff..7c52909 100644
51239--- a/drivers/platform/x86/sony-laptop.c
51240+++ b/drivers/platform/x86/sony-laptop.c
51241@@ -2527,7 +2527,7 @@ static void sony_nc_gfx_switch_cleanup(struct platform_device *pd)
51242 }
51243
51244 /* High speed charging function */
51245-static struct device_attribute *hsc_handle;
51246+static device_attribute_no_const *hsc_handle;
51247
51248 static ssize_t sony_nc_highspeed_charging_store(struct device *dev,
51249 struct device_attribute *attr,
51250@@ -2601,7 +2601,7 @@ static void sony_nc_highspeed_charging_cleanup(struct platform_device *pd)
51251 }
51252
51253 /* low battery function */
51254-static struct device_attribute *lowbatt_handle;
51255+static device_attribute_no_const *lowbatt_handle;
51256
51257 static ssize_t sony_nc_lowbatt_store(struct device *dev,
51258 struct device_attribute *attr,
51259@@ -2667,7 +2667,7 @@ static void sony_nc_lowbatt_cleanup(struct platform_device *pd)
51260 }
51261
51262 /* fan speed function */
51263-static struct device_attribute *fan_handle, *hsf_handle;
51264+static device_attribute_no_const *fan_handle, *hsf_handle;
51265
51266 static ssize_t sony_nc_hsfan_store(struct device *dev,
51267 struct device_attribute *attr,
51268@@ -2774,7 +2774,7 @@ static void sony_nc_fanspeed_cleanup(struct platform_device *pd)
51269 }
51270
51271 /* USB charge function */
51272-static struct device_attribute *uc_handle;
51273+static device_attribute_no_const *uc_handle;
51274
51275 static ssize_t sony_nc_usb_charge_store(struct device *dev,
51276 struct device_attribute *attr,
51277@@ -2848,7 +2848,7 @@ static void sony_nc_usb_charge_cleanup(struct platform_device *pd)
51278 }
51279
51280 /* Panel ID function */
51281-static struct device_attribute *panel_handle;
51282+static device_attribute_no_const *panel_handle;
51283
51284 static ssize_t sony_nc_panelid_show(struct device *dev,
51285 struct device_attribute *attr, char *buffer)
51286@@ -2895,7 +2895,7 @@ static void sony_nc_panelid_cleanup(struct platform_device *pd)
51287 }
51288
51289 /* smart connect function */
51290-static struct device_attribute *sc_handle;
51291+static device_attribute_no_const *sc_handle;
51292
51293 static ssize_t sony_nc_smart_conn_store(struct device *dev,
51294 struct device_attribute *attr,
51295diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
51296index 3bbc6eb..7760460 100644
51297--- a/drivers/platform/x86/thinkpad_acpi.c
51298+++ b/drivers/platform/x86/thinkpad_acpi.c
51299@@ -2094,7 +2094,7 @@ static int hotkey_mask_get(void)
51300 return 0;
51301 }
51302
51303-void static hotkey_mask_warn_incomplete_mask(void)
51304+static void hotkey_mask_warn_incomplete_mask(void)
51305 {
51306 /* log only what the user can fix... */
51307 const u32 wantedmask = hotkey_driver_mask &
51308@@ -2438,10 +2438,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
51309 && !tp_features.bright_unkfw)
51310 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
51311 }
51312+}
51313
51314 #undef TPACPI_COMPARE_KEY
51315 #undef TPACPI_MAY_SEND_KEY
51316-}
51317
51318 /*
51319 * Polling driver
51320diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
51321index 438d4c7..ca8a2fb 100644
51322--- a/drivers/pnp/pnpbios/bioscalls.c
51323+++ b/drivers/pnp/pnpbios/bioscalls.c
51324@@ -59,7 +59,7 @@ do { \
51325 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
51326 } while(0)
51327
51328-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
51329+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
51330 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
51331
51332 /*
51333@@ -96,7 +96,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
51334
51335 cpu = get_cpu();
51336 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
51337+
51338+ pax_open_kernel();
51339 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
51340+ pax_close_kernel();
51341
51342 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
51343 spin_lock_irqsave(&pnp_bios_lock, flags);
51344@@ -134,7 +137,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
51345 :"memory");
51346 spin_unlock_irqrestore(&pnp_bios_lock, flags);
51347
51348+ pax_open_kernel();
51349 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
51350+ pax_close_kernel();
51351+
51352 put_cpu();
51353
51354 /* If we get here and this is set then the PnP BIOS faulted on us. */
51355@@ -468,7 +474,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
51356 return status;
51357 }
51358
51359-void pnpbios_calls_init(union pnp_bios_install_struct *header)
51360+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
51361 {
51362 int i;
51363
51364@@ -476,6 +482,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
51365 pnp_bios_callpoint.offset = header->fields.pm16offset;
51366 pnp_bios_callpoint.segment = PNP_CS16;
51367
51368+ pax_open_kernel();
51369+
51370 for_each_possible_cpu(i) {
51371 struct desc_struct *gdt = get_cpu_gdt_table(i);
51372 if (!gdt)
51373@@ -487,4 +495,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
51374 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
51375 (unsigned long)__va(header->fields.pm16dseg));
51376 }
51377+
51378+ pax_close_kernel();
51379 }
51380diff --git a/drivers/power/pda_power.c b/drivers/power/pda_power.c
51381index 0c52e2a..3421ab7 100644
51382--- a/drivers/power/pda_power.c
51383+++ b/drivers/power/pda_power.c
51384@@ -37,7 +37,11 @@ static int polling;
51385
51386 #if IS_ENABLED(CONFIG_USB_PHY)
51387 static struct usb_phy *transceiver;
51388-static struct notifier_block otg_nb;
51389+static int otg_handle_notification(struct notifier_block *nb,
51390+ unsigned long event, void *unused);
51391+static struct notifier_block otg_nb = {
51392+ .notifier_call = otg_handle_notification
51393+};
51394 #endif
51395
51396 static struct regulator *ac_draw;
51397@@ -369,7 +373,6 @@ static int pda_power_probe(struct platform_device *pdev)
51398
51399 #if IS_ENABLED(CONFIG_USB_PHY)
51400 if (!IS_ERR_OR_NULL(transceiver) && pdata->use_otg_notifier) {
51401- otg_nb.notifier_call = otg_handle_notification;
51402 ret = usb_register_notifier(transceiver, &otg_nb);
51403 if (ret) {
51404 dev_err(dev, "failure to register otg notifier\n");
51405diff --git a/drivers/power/power_supply.h b/drivers/power/power_supply.h
51406index cc439fd..8fa30df 100644
51407--- a/drivers/power/power_supply.h
51408+++ b/drivers/power/power_supply.h
51409@@ -16,12 +16,12 @@ struct power_supply;
51410
51411 #ifdef CONFIG_SYSFS
51412
51413-extern void power_supply_init_attrs(struct device_type *dev_type);
51414+extern void power_supply_init_attrs(void);
51415 extern int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env);
51416
51417 #else
51418
51419-static inline void power_supply_init_attrs(struct device_type *dev_type) {}
51420+static inline void power_supply_init_attrs(void) {}
51421 #define power_supply_uevent NULL
51422
51423 #endif /* CONFIG_SYSFS */
51424diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
51425index 078afd6..fbac9da 100644
51426--- a/drivers/power/power_supply_core.c
51427+++ b/drivers/power/power_supply_core.c
51428@@ -28,7 +28,10 @@ EXPORT_SYMBOL_GPL(power_supply_class);
51429 ATOMIC_NOTIFIER_HEAD(power_supply_notifier);
51430 EXPORT_SYMBOL_GPL(power_supply_notifier);
51431
51432-static struct device_type power_supply_dev_type;
51433+extern const struct attribute_group *power_supply_attr_groups[];
51434+static struct device_type power_supply_dev_type = {
51435+ .groups = power_supply_attr_groups,
51436+};
51437
51438 static bool __power_supply_is_supplied_by(struct power_supply *supplier,
51439 struct power_supply *supply)
51440@@ -640,7 +643,7 @@ static int __init power_supply_class_init(void)
51441 return PTR_ERR(power_supply_class);
51442
51443 power_supply_class->dev_uevent = power_supply_uevent;
51444- power_supply_init_attrs(&power_supply_dev_type);
51445+ power_supply_init_attrs();
51446
51447 return 0;
51448 }
51449diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
51450index 750a202..99c8f4b 100644
51451--- a/drivers/power/power_supply_sysfs.c
51452+++ b/drivers/power/power_supply_sysfs.c
51453@@ -234,17 +234,15 @@ static struct attribute_group power_supply_attr_group = {
51454 .is_visible = power_supply_attr_is_visible,
51455 };
51456
51457-static const struct attribute_group *power_supply_attr_groups[] = {
51458+const struct attribute_group *power_supply_attr_groups[] = {
51459 &power_supply_attr_group,
51460 NULL,
51461 };
51462
51463-void power_supply_init_attrs(struct device_type *dev_type)
51464+void power_supply_init_attrs(void)
51465 {
51466 int i;
51467
51468- dev_type->groups = power_supply_attr_groups;
51469-
51470 for (i = 0; i < ARRAY_SIZE(power_supply_attrs); i++)
51471 __power_supply_attrs[i] = &power_supply_attrs[i].attr;
51472 }
51473diff --git a/drivers/powercap/powercap_sys.c b/drivers/powercap/powercap_sys.c
51474index 84419af..268ede8 100644
51475--- a/drivers/powercap/powercap_sys.c
51476+++ b/drivers/powercap/powercap_sys.c
51477@@ -154,8 +154,77 @@ struct powercap_constraint_attr {
51478 struct device_attribute name_attr;
51479 };
51480
51481+static ssize_t show_constraint_name(struct device *dev,
51482+ struct device_attribute *dev_attr,
51483+ char *buf);
51484+
51485 static struct powercap_constraint_attr
51486- constraint_attrs[MAX_CONSTRAINTS_PER_ZONE];
51487+ constraint_attrs[MAX_CONSTRAINTS_PER_ZONE] = {
51488+ [0 ... MAX_CONSTRAINTS_PER_ZONE - 1] = {
51489+ .power_limit_attr = {
51490+ .attr = {
51491+ .name = NULL,
51492+ .mode = S_IWUSR | S_IRUGO
51493+ },
51494+ .show = show_constraint_power_limit_uw,
51495+ .store = store_constraint_power_limit_uw
51496+ },
51497+
51498+ .time_window_attr = {
51499+ .attr = {
51500+ .name = NULL,
51501+ .mode = S_IWUSR | S_IRUGO
51502+ },
51503+ .show = show_constraint_time_window_us,
51504+ .store = store_constraint_time_window_us
51505+ },
51506+
51507+ .max_power_attr = {
51508+ .attr = {
51509+ .name = NULL,
51510+ .mode = S_IRUGO
51511+ },
51512+ .show = show_constraint_max_power_uw,
51513+ .store = NULL
51514+ },
51515+
51516+ .min_power_attr = {
51517+ .attr = {
51518+ .name = NULL,
51519+ .mode = S_IRUGO
51520+ },
51521+ .show = show_constraint_min_power_uw,
51522+ .store = NULL
51523+ },
51524+
51525+ .max_time_window_attr = {
51526+ .attr = {
51527+ .name = NULL,
51528+ .mode = S_IRUGO
51529+ },
51530+ .show = show_constraint_max_time_window_us,
51531+ .store = NULL
51532+ },
51533+
51534+ .min_time_window_attr = {
51535+ .attr = {
51536+ .name = NULL,
51537+ .mode = S_IRUGO
51538+ },
51539+ .show = show_constraint_min_time_window_us,
51540+ .store = NULL
51541+ },
51542+
51543+ .name_attr = {
51544+ .attr = {
51545+ .name = NULL,
51546+ .mode = S_IRUGO
51547+ },
51548+ .show = show_constraint_name,
51549+ .store = NULL
51550+ }
51551+ }
51552+};
51553
51554 /* A list of powercap control_types */
51555 static LIST_HEAD(powercap_cntrl_list);
51556@@ -193,23 +262,16 @@ static ssize_t show_constraint_name(struct device *dev,
51557 }
51558
51559 static int create_constraint_attribute(int id, const char *name,
51560- int mode,
51561- struct device_attribute *dev_attr,
51562- ssize_t (*show)(struct device *,
51563- struct device_attribute *, char *),
51564- ssize_t (*store)(struct device *,
51565- struct device_attribute *,
51566- const char *, size_t)
51567- )
51568+ struct device_attribute *dev_attr)
51569 {
51570+ name = kasprintf(GFP_KERNEL, "constraint_%d_%s", id, name);
51571
51572- dev_attr->attr.name = kasprintf(GFP_KERNEL, "constraint_%d_%s",
51573- id, name);
51574- if (!dev_attr->attr.name)
51575+ if (!name)
51576 return -ENOMEM;
51577- dev_attr->attr.mode = mode;
51578- dev_attr->show = show;
51579- dev_attr->store = store;
51580+
51581+ pax_open_kernel();
51582+ *(const char **)&dev_attr->attr.name = name;
51583+ pax_close_kernel();
51584
51585 return 0;
51586 }
51587@@ -236,49 +298,31 @@ static int seed_constraint_attributes(void)
51588
51589 for (i = 0; i < MAX_CONSTRAINTS_PER_ZONE; ++i) {
51590 ret = create_constraint_attribute(i, "power_limit_uw",
51591- S_IWUSR | S_IRUGO,
51592- &constraint_attrs[i].power_limit_attr,
51593- show_constraint_power_limit_uw,
51594- store_constraint_power_limit_uw);
51595+ &constraint_attrs[i].power_limit_attr);
51596 if (ret)
51597 goto err_alloc;
51598 ret = create_constraint_attribute(i, "time_window_us",
51599- S_IWUSR | S_IRUGO,
51600- &constraint_attrs[i].time_window_attr,
51601- show_constraint_time_window_us,
51602- store_constraint_time_window_us);
51603+ &constraint_attrs[i].time_window_attr);
51604 if (ret)
51605 goto err_alloc;
51606- ret = create_constraint_attribute(i, "name", S_IRUGO,
51607- &constraint_attrs[i].name_attr,
51608- show_constraint_name,
51609- NULL);
51610+ ret = create_constraint_attribute(i, "name",
51611+ &constraint_attrs[i].name_attr);
51612 if (ret)
51613 goto err_alloc;
51614- ret = create_constraint_attribute(i, "max_power_uw", S_IRUGO,
51615- &constraint_attrs[i].max_power_attr,
51616- show_constraint_max_power_uw,
51617- NULL);
51618+ ret = create_constraint_attribute(i, "max_power_uw",
51619+ &constraint_attrs[i].max_power_attr);
51620 if (ret)
51621 goto err_alloc;
51622- ret = create_constraint_attribute(i, "min_power_uw", S_IRUGO,
51623- &constraint_attrs[i].min_power_attr,
51624- show_constraint_min_power_uw,
51625- NULL);
51626+ ret = create_constraint_attribute(i, "min_power_uw",
51627+ &constraint_attrs[i].min_power_attr);
51628 if (ret)
51629 goto err_alloc;
51630 ret = create_constraint_attribute(i, "max_time_window_us",
51631- S_IRUGO,
51632- &constraint_attrs[i].max_time_window_attr,
51633- show_constraint_max_time_window_us,
51634- NULL);
51635+ &constraint_attrs[i].max_time_window_attr);
51636 if (ret)
51637 goto err_alloc;
51638 ret = create_constraint_attribute(i, "min_time_window_us",
51639- S_IRUGO,
51640- &constraint_attrs[i].min_time_window_attr,
51641- show_constraint_min_time_window_us,
51642- NULL);
51643+ &constraint_attrs[i].min_time_window_attr);
51644 if (ret)
51645 goto err_alloc;
51646
51647@@ -378,10 +422,12 @@ static void create_power_zone_common_attributes(
51648 power_zone->zone_dev_attrs[count++] =
51649 &dev_attr_max_energy_range_uj.attr;
51650 if (power_zone->ops->get_energy_uj) {
51651+ pax_open_kernel();
51652 if (power_zone->ops->reset_energy_uj)
51653- dev_attr_energy_uj.attr.mode = S_IWUSR | S_IRUGO;
51654+ *(umode_t *)&dev_attr_energy_uj.attr.mode = S_IWUSR | S_IRUGO;
51655 else
51656- dev_attr_energy_uj.attr.mode = S_IRUGO;
51657+ *(umode_t *)&dev_attr_energy_uj.attr.mode = S_IRUGO;
51658+ pax_close_kernel();
51659 power_zone->zone_dev_attrs[count++] =
51660 &dev_attr_energy_uj.attr;
51661 }
51662diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h
51663index 9c5d414..c7900ce 100644
51664--- a/drivers/ptp/ptp_private.h
51665+++ b/drivers/ptp/ptp_private.h
51666@@ -51,7 +51,7 @@ struct ptp_clock {
51667 struct mutex pincfg_mux; /* protect concurrent info->pin_config access */
51668 wait_queue_head_t tsev_wq;
51669 int defunct; /* tells readers to go away when clock is being removed */
51670- struct device_attribute *pin_dev_attr;
51671+ device_attribute_no_const *pin_dev_attr;
51672 struct attribute **pin_attr;
51673 struct attribute_group pin_attr_group;
51674 };
51675diff --git a/drivers/ptp/ptp_sysfs.c b/drivers/ptp/ptp_sysfs.c
51676index 302e626..12579af 100644
51677--- a/drivers/ptp/ptp_sysfs.c
51678+++ b/drivers/ptp/ptp_sysfs.c
51679@@ -280,7 +280,7 @@ static int ptp_populate_pins(struct ptp_clock *ptp)
51680 goto no_pin_attr;
51681
51682 for (i = 0; i < n_pins; i++) {
51683- struct device_attribute *da = &ptp->pin_dev_attr[i];
51684+ device_attribute_no_const *da = &ptp->pin_dev_attr[i];
51685 sysfs_attr_init(&da->attr);
51686 da->attr.name = info->pin_config[i].name;
51687 da->attr.mode = 0644;
51688diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
51689index a3c3785..c901e3a 100644
51690--- a/drivers/regulator/core.c
51691+++ b/drivers/regulator/core.c
51692@@ -3481,7 +3481,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
51693 {
51694 const struct regulation_constraints *constraints = NULL;
51695 const struct regulator_init_data *init_data;
51696- static atomic_t regulator_no = ATOMIC_INIT(0);
51697+ static atomic_unchecked_t regulator_no = ATOMIC_INIT(0);
51698 struct regulator_dev *rdev;
51699 struct device *dev;
51700 int ret, i;
51701@@ -3551,7 +3551,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
51702 rdev->dev.of_node = of_node_get(config->of_node);
51703 rdev->dev.parent = dev;
51704 dev_set_name(&rdev->dev, "regulator.%d",
51705- atomic_inc_return(&regulator_no) - 1);
51706+ atomic_inc_return_unchecked(&regulator_no) - 1);
51707 ret = device_register(&rdev->dev);
51708 if (ret != 0) {
51709 put_device(&rdev->dev);
51710diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
51711index 2fc4111..6aa88ca 100644
51712--- a/drivers/regulator/max8660.c
51713+++ b/drivers/regulator/max8660.c
51714@@ -424,8 +424,10 @@ static int max8660_probe(struct i2c_client *client,
51715 max8660->shadow_regs[MAX8660_OVER1] = 5;
51716 } else {
51717 /* Otherwise devices can be toggled via software */
51718- max8660_dcdc_ops.enable = max8660_dcdc_enable;
51719- max8660_dcdc_ops.disable = max8660_dcdc_disable;
51720+ pax_open_kernel();
51721+ *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
51722+ *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
51723+ pax_close_kernel();
51724 }
51725
51726 /*
51727diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c
51728index dbedf17..18ff6b7 100644
51729--- a/drivers/regulator/max8973-regulator.c
51730+++ b/drivers/regulator/max8973-regulator.c
51731@@ -403,9 +403,11 @@ static int max8973_probe(struct i2c_client *client,
51732 if (!pdata || !pdata->enable_ext_control) {
51733 max->desc.enable_reg = MAX8973_VOUT;
51734 max->desc.enable_mask = MAX8973_VOUT_ENABLE;
51735- max->ops.enable = regulator_enable_regmap;
51736- max->ops.disable = regulator_disable_regmap;
51737- max->ops.is_enabled = regulator_is_enabled_regmap;
51738+ pax_open_kernel();
51739+ *(void **)&max->ops.enable = regulator_enable_regmap;
51740+ *(void **)&max->ops.disable = regulator_disable_regmap;
51741+ *(void **)&max->ops.is_enabled = regulator_is_enabled_regmap;
51742+ pax_close_kernel();
51743 }
51744
51745 if (pdata) {
51746diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
51747index f374fa5..26f0683 100644
51748--- a/drivers/regulator/mc13892-regulator.c
51749+++ b/drivers/regulator/mc13892-regulator.c
51750@@ -582,10 +582,12 @@ static int mc13892_regulator_probe(struct platform_device *pdev)
51751 }
51752 mc13xxx_unlock(mc13892);
51753
51754- mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
51755+ pax_open_kernel();
51756+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
51757 = mc13892_vcam_set_mode;
51758- mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
51759+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
51760 = mc13892_vcam_get_mode;
51761+ pax_close_kernel();
51762
51763 mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators,
51764 ARRAY_SIZE(mc13892_regulators));
51765diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
51766index 5b2e761..c8c8a4a 100644
51767--- a/drivers/rtc/rtc-cmos.c
51768+++ b/drivers/rtc/rtc-cmos.c
51769@@ -789,7 +789,9 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
51770 hpet_rtc_timer_init();
51771
51772 /* export at least the first block of NVRAM */
51773- nvram.size = address_space - NVRAM_OFFSET;
51774+ pax_open_kernel();
51775+ *(size_t *)&nvram.size = address_space - NVRAM_OFFSET;
51776+ pax_close_kernel();
51777 retval = sysfs_create_bin_file(&dev->kobj, &nvram);
51778 if (retval < 0) {
51779 dev_dbg(dev, "can't create nvram file? %d\n", retval);
51780diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
51781index d049393..bb20be0 100644
51782--- a/drivers/rtc/rtc-dev.c
51783+++ b/drivers/rtc/rtc-dev.c
51784@@ -16,6 +16,7 @@
51785 #include <linux/module.h>
51786 #include <linux/rtc.h>
51787 #include <linux/sched.h>
51788+#include <linux/grsecurity.h>
51789 #include "rtc-core.h"
51790
51791 static dev_t rtc_devt;
51792@@ -347,6 +348,8 @@ static long rtc_dev_ioctl(struct file *file,
51793 if (copy_from_user(&tm, uarg, sizeof(tm)))
51794 return -EFAULT;
51795
51796+ gr_log_timechange();
51797+
51798 return rtc_set_time(rtc, &tm);
51799
51800 case RTC_PIE_ON:
51801diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
51802index f03d5ba..8325bf6 100644
51803--- a/drivers/rtc/rtc-ds1307.c
51804+++ b/drivers/rtc/rtc-ds1307.c
51805@@ -107,7 +107,7 @@ struct ds1307 {
51806 u8 offset; /* register's offset */
51807 u8 regs[11];
51808 u16 nvram_offset;
51809- struct bin_attribute *nvram;
51810+ bin_attribute_no_const *nvram;
51811 enum ds_type type;
51812 unsigned long flags;
51813 #define HAS_NVRAM 0 /* bit 0 == sysfs file active */
51814diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c
51815index 11880c1..b823aa4 100644
51816--- a/drivers/rtc/rtc-m48t59.c
51817+++ b/drivers/rtc/rtc-m48t59.c
51818@@ -483,7 +483,9 @@ static int m48t59_rtc_probe(struct platform_device *pdev)
51819 if (IS_ERR(m48t59->rtc))
51820 return PTR_ERR(m48t59->rtc);
51821
51822- m48t59_nvram_attr.size = pdata->offset;
51823+ pax_open_kernel();
51824+ *(size_t *)&m48t59_nvram_attr.size = pdata->offset;
51825+ pax_close_kernel();
51826
51827 ret = sysfs_create_bin_file(&pdev->dev.kobj, &m48t59_nvram_attr);
51828 if (ret)
51829diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
51830index e693af6..2e525b6 100644
51831--- a/drivers/scsi/bfa/bfa_fcpim.h
51832+++ b/drivers/scsi/bfa/bfa_fcpim.h
51833@@ -36,7 +36,7 @@ struct bfa_iotag_s {
51834
51835 struct bfa_itn_s {
51836 bfa_isr_func_t isr;
51837-};
51838+} __no_const;
51839
51840 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
51841 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
51842diff --git a/drivers/scsi/bfa/bfa_fcs.c b/drivers/scsi/bfa/bfa_fcs.c
51843index 0f19455..ef7adb5 100644
51844--- a/drivers/scsi/bfa/bfa_fcs.c
51845+++ b/drivers/scsi/bfa/bfa_fcs.c
51846@@ -38,10 +38,21 @@ struct bfa_fcs_mod_s {
51847 #define BFA_FCS_MODULE(_mod) { _mod ## _modinit, _mod ## _modexit }
51848
51849 static struct bfa_fcs_mod_s fcs_modules[] = {
51850- { bfa_fcs_port_attach, NULL, NULL },
51851- { bfa_fcs_uf_attach, NULL, NULL },
51852- { bfa_fcs_fabric_attach, bfa_fcs_fabric_modinit,
51853- bfa_fcs_fabric_modexit },
51854+ {
51855+ .attach = bfa_fcs_port_attach,
51856+ .modinit = NULL,
51857+ .modexit = NULL
51858+ },
51859+ {
51860+ .attach = bfa_fcs_uf_attach,
51861+ .modinit = NULL,
51862+ .modexit = NULL
51863+ },
51864+ {
51865+ .attach = bfa_fcs_fabric_attach,
51866+ .modinit = bfa_fcs_fabric_modinit,
51867+ .modexit = bfa_fcs_fabric_modexit
51868+ },
51869 };
51870
51871 /*
51872diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
51873index ff75ef8..2dfe00a 100644
51874--- a/drivers/scsi/bfa/bfa_fcs_lport.c
51875+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
51876@@ -89,15 +89,26 @@ static struct {
51877 void (*offline) (struct bfa_fcs_lport_s *port);
51878 } __port_action[] = {
51879 {
51880- bfa_fcs_lport_unknown_init, bfa_fcs_lport_unknown_online,
51881- bfa_fcs_lport_unknown_offline}, {
51882- bfa_fcs_lport_fab_init, bfa_fcs_lport_fab_online,
51883- bfa_fcs_lport_fab_offline}, {
51884- bfa_fcs_lport_n2n_init, bfa_fcs_lport_n2n_online,
51885- bfa_fcs_lport_n2n_offline}, {
51886- bfa_fcs_lport_loop_init, bfa_fcs_lport_loop_online,
51887- bfa_fcs_lport_loop_offline},
51888- };
51889+ .init = bfa_fcs_lport_unknown_init,
51890+ .online = bfa_fcs_lport_unknown_online,
51891+ .offline = bfa_fcs_lport_unknown_offline
51892+ },
51893+ {
51894+ .init = bfa_fcs_lport_fab_init,
51895+ .online = bfa_fcs_lport_fab_online,
51896+ .offline = bfa_fcs_lport_fab_offline
51897+ },
51898+ {
51899+ .init = bfa_fcs_lport_n2n_init,
51900+ .online = bfa_fcs_lport_n2n_online,
51901+ .offline = bfa_fcs_lport_n2n_offline
51902+ },
51903+ {
51904+ .init = bfa_fcs_lport_loop_init,
51905+ .online = bfa_fcs_lport_loop_online,
51906+ .offline = bfa_fcs_lport_loop_offline
51907+ },
51908+};
51909
51910 /*
51911 * fcs_port_sm FCS logical port state machine
51912diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
51913index a38aafa0..fe8f03b 100644
51914--- a/drivers/scsi/bfa/bfa_ioc.h
51915+++ b/drivers/scsi/bfa/bfa_ioc.h
51916@@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
51917 bfa_ioc_disable_cbfn_t disable_cbfn;
51918 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
51919 bfa_ioc_reset_cbfn_t reset_cbfn;
51920-};
51921+} __no_const;
51922
51923 /*
51924 * IOC event notification mechanism.
51925@@ -352,7 +352,7 @@ struct bfa_ioc_hwif_s {
51926 void (*ioc_set_alt_fwstate) (struct bfa_ioc_s *ioc,
51927 enum bfi_ioc_state fwstate);
51928 enum bfi_ioc_state (*ioc_get_alt_fwstate) (struct bfa_ioc_s *ioc);
51929-};
51930+} __no_const;
51931
51932 /*
51933 * Queue element to wait for room in request queue. FIFO order is
51934diff --git a/drivers/scsi/bfa/bfa_modules.h b/drivers/scsi/bfa/bfa_modules.h
51935index a14c784..6de6790 100644
51936--- a/drivers/scsi/bfa/bfa_modules.h
51937+++ b/drivers/scsi/bfa/bfa_modules.h
51938@@ -78,12 +78,12 @@ enum {
51939 \
51940 extern struct bfa_module_s hal_mod_ ## __mod; \
51941 struct bfa_module_s hal_mod_ ## __mod = { \
51942- bfa_ ## __mod ## _meminfo, \
51943- bfa_ ## __mod ## _attach, \
51944- bfa_ ## __mod ## _detach, \
51945- bfa_ ## __mod ## _start, \
51946- bfa_ ## __mod ## _stop, \
51947- bfa_ ## __mod ## _iocdisable, \
51948+ .meminfo = bfa_ ## __mod ## _meminfo, \
51949+ .attach = bfa_ ## __mod ## _attach, \
51950+ .detach = bfa_ ## __mod ## _detach, \
51951+ .start = bfa_ ## __mod ## _start, \
51952+ .stop = bfa_ ## __mod ## _stop, \
51953+ .iocdisable = bfa_ ## __mod ## _iocdisable, \
51954 }
51955
51956 #define BFA_CACHELINE_SZ (256)
51957diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c
51958index 045c4e1..13de803 100644
51959--- a/drivers/scsi/fcoe/fcoe_sysfs.c
51960+++ b/drivers/scsi/fcoe/fcoe_sysfs.c
51961@@ -33,8 +33,8 @@
51962 */
51963 #include "libfcoe.h"
51964
51965-static atomic_t ctlr_num;
51966-static atomic_t fcf_num;
51967+static atomic_unchecked_t ctlr_num;
51968+static atomic_unchecked_t fcf_num;
51969
51970 /*
51971 * fcoe_fcf_dev_loss_tmo: the default number of seconds that fcoe sysfs
51972@@ -685,7 +685,7 @@ struct fcoe_ctlr_device *fcoe_ctlr_device_add(struct device *parent,
51973 if (!ctlr)
51974 goto out;
51975
51976- ctlr->id = atomic_inc_return(&ctlr_num) - 1;
51977+ ctlr->id = atomic_inc_return_unchecked(&ctlr_num) - 1;
51978 ctlr->f = f;
51979 ctlr->mode = FIP_CONN_TYPE_FABRIC;
51980 INIT_LIST_HEAD(&ctlr->fcfs);
51981@@ -902,7 +902,7 @@ struct fcoe_fcf_device *fcoe_fcf_device_add(struct fcoe_ctlr_device *ctlr,
51982 fcf->dev.parent = &ctlr->dev;
51983 fcf->dev.bus = &fcoe_bus_type;
51984 fcf->dev.type = &fcoe_fcf_device_type;
51985- fcf->id = atomic_inc_return(&fcf_num) - 1;
51986+ fcf->id = atomic_inc_return_unchecked(&fcf_num) - 1;
51987 fcf->state = FCOE_FCF_STATE_UNKNOWN;
51988
51989 fcf->dev_loss_tmo = ctlr->fcf_dev_loss_tmo;
51990@@ -938,8 +938,8 @@ int __init fcoe_sysfs_setup(void)
51991 {
51992 int error;
51993
51994- atomic_set(&ctlr_num, 0);
51995- atomic_set(&fcf_num, 0);
51996+ atomic_set_unchecked(&ctlr_num, 0);
51997+ atomic_set_unchecked(&fcf_num, 0);
51998
51999 error = bus_register(&fcoe_bus_type);
52000 if (error)
52001diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
52002index 6de80e3..a11e0ac 100644
52003--- a/drivers/scsi/hosts.c
52004+++ b/drivers/scsi/hosts.c
52005@@ -42,7 +42,7 @@
52006 #include "scsi_logging.h"
52007
52008
52009-static atomic_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
52010+static atomic_unchecked_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
52011
52012
52013 static void scsi_host_cls_release(struct device *dev)
52014@@ -392,7 +392,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
52015 * subtract one because we increment first then return, but we need to
52016 * know what the next host number was before increment
52017 */
52018- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
52019+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
52020 shost->dma_channel = 0xff;
52021
52022 /* These three are default values which can be overridden */
52023diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
52024index 6b35d0d..2880305 100644
52025--- a/drivers/scsi/hpsa.c
52026+++ b/drivers/scsi/hpsa.c
52027@@ -701,10 +701,10 @@ static inline u32 next_command(struct ctlr_info *h, u8 q)
52028 unsigned long flags;
52029
52030 if (h->transMethod & CFGTBL_Trans_io_accel1)
52031- return h->access.command_completed(h, q);
52032+ return h->access->command_completed(h, q);
52033
52034 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
52035- return h->access.command_completed(h, q);
52036+ return h->access->command_completed(h, q);
52037
52038 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
52039 a = rq->head[rq->current_entry];
52040@@ -5454,7 +5454,7 @@ static void start_io(struct ctlr_info *h, unsigned long *flags)
52041 while (!list_empty(&h->reqQ)) {
52042 c = list_entry(h->reqQ.next, struct CommandList, list);
52043 /* can't do anything if fifo is full */
52044- if ((h->access.fifo_full(h))) {
52045+ if ((h->access->fifo_full(h))) {
52046 h->fifo_recently_full = 1;
52047 dev_warn(&h->pdev->dev, "fifo full\n");
52048 break;
52049@@ -5476,7 +5476,7 @@ static void start_io(struct ctlr_info *h, unsigned long *flags)
52050
52051 /* Tell the controller execute command */
52052 spin_unlock_irqrestore(&h->lock, *flags);
52053- h->access.submit_command(h, c);
52054+ h->access->submit_command(h, c);
52055 spin_lock_irqsave(&h->lock, *flags);
52056 }
52057 }
52058@@ -5492,17 +5492,17 @@ static void lock_and_start_io(struct ctlr_info *h)
52059
52060 static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
52061 {
52062- return h->access.command_completed(h, q);
52063+ return h->access->command_completed(h, q);
52064 }
52065
52066 static inline bool interrupt_pending(struct ctlr_info *h)
52067 {
52068- return h->access.intr_pending(h);
52069+ return h->access->intr_pending(h);
52070 }
52071
52072 static inline long interrupt_not_for_us(struct ctlr_info *h)
52073 {
52074- return (h->access.intr_pending(h) == 0) ||
52075+ return (h->access->intr_pending(h) == 0) ||
52076 (h->interrupts_enabled == 0);
52077 }
52078
52079@@ -6458,7 +6458,7 @@ static int hpsa_pci_init(struct ctlr_info *h)
52080 if (prod_index < 0)
52081 return -ENODEV;
52082 h->product_name = products[prod_index].product_name;
52083- h->access = *(products[prod_index].access);
52084+ h->access = products[prod_index].access;
52085
52086 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
52087 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
52088@@ -6780,7 +6780,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
52089 unsigned long flags;
52090 u32 lockup_detected;
52091
52092- h->access.set_intr_mask(h, HPSA_INTR_OFF);
52093+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
52094 spin_lock_irqsave(&h->lock, flags);
52095 lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
52096 if (!lockup_detected) {
52097@@ -7027,7 +7027,7 @@ reinit_after_soft_reset:
52098 }
52099
52100 /* make sure the board interrupts are off */
52101- h->access.set_intr_mask(h, HPSA_INTR_OFF);
52102+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
52103
52104 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
52105 goto clean2;
52106@@ -7062,7 +7062,7 @@ reinit_after_soft_reset:
52107 * fake ones to scoop up any residual completions.
52108 */
52109 spin_lock_irqsave(&h->lock, flags);
52110- h->access.set_intr_mask(h, HPSA_INTR_OFF);
52111+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
52112 spin_unlock_irqrestore(&h->lock, flags);
52113 free_irqs(h);
52114 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
52115@@ -7081,9 +7081,9 @@ reinit_after_soft_reset:
52116 dev_info(&h->pdev->dev, "Board READY.\n");
52117 dev_info(&h->pdev->dev,
52118 "Waiting for stale completions to drain.\n");
52119- h->access.set_intr_mask(h, HPSA_INTR_ON);
52120+ h->access->set_intr_mask(h, HPSA_INTR_ON);
52121 msleep(10000);
52122- h->access.set_intr_mask(h, HPSA_INTR_OFF);
52123+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
52124
52125 rc = controller_reset_failed(h->cfgtable);
52126 if (rc)
52127@@ -7109,7 +7109,7 @@ reinit_after_soft_reset:
52128 h->drv_req_rescan = 0;
52129
52130 /* Turn the interrupts on so we can service requests */
52131- h->access.set_intr_mask(h, HPSA_INTR_ON);
52132+ h->access->set_intr_mask(h, HPSA_INTR_ON);
52133
52134 hpsa_hba_inquiry(h);
52135 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
52136@@ -7174,7 +7174,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
52137 * To write all data in the battery backed cache to disks
52138 */
52139 hpsa_flush_cache(h);
52140- h->access.set_intr_mask(h, HPSA_INTR_OFF);
52141+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
52142 hpsa_free_irqs_and_disable_msix(h);
52143 }
52144
52145@@ -7292,7 +7292,7 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
52146 CFGTBL_Trans_enable_directed_msix |
52147 (trans_support & (CFGTBL_Trans_io_accel1 |
52148 CFGTBL_Trans_io_accel2));
52149- struct access_method access = SA5_performant_access;
52150+ struct access_method *access = &SA5_performant_access;
52151
52152 /* This is a bit complicated. There are 8 registers on
52153 * the controller which we write to to tell it 8 different
52154@@ -7334,7 +7334,7 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
52155 * perform the superfluous readl() after each command submission.
52156 */
52157 if (trans_support & (CFGTBL_Trans_io_accel1 | CFGTBL_Trans_io_accel2))
52158- access = SA5_performant_access_no_read;
52159+ access = &SA5_performant_access_no_read;
52160
52161 /* Controller spec: zero out this buffer. */
52162 for (i = 0; i < h->nreply_queues; i++)
52163@@ -7364,12 +7364,12 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
52164 * enable outbound interrupt coalescing in accelerator mode;
52165 */
52166 if (trans_support & CFGTBL_Trans_io_accel1) {
52167- access = SA5_ioaccel_mode1_access;
52168+ access = &SA5_ioaccel_mode1_access;
52169 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
52170 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
52171 } else {
52172 if (trans_support & CFGTBL_Trans_io_accel2) {
52173- access = SA5_ioaccel_mode2_access;
52174+ access = &SA5_ioaccel_mode2_access;
52175 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
52176 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
52177 }
52178diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
52179index 24472ce..8782caf 100644
52180--- a/drivers/scsi/hpsa.h
52181+++ b/drivers/scsi/hpsa.h
52182@@ -127,7 +127,7 @@ struct ctlr_info {
52183 unsigned int msix_vector;
52184 unsigned int msi_vector;
52185 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
52186- struct access_method access;
52187+ struct access_method *access;
52188 char hba_mode_enabled;
52189
52190 /* queue and queue Info */
52191@@ -536,43 +536,43 @@ static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info *h, u8 q)
52192 }
52193
52194 static struct access_method SA5_access = {
52195- SA5_submit_command,
52196- SA5_intr_mask,
52197- SA5_fifo_full,
52198- SA5_intr_pending,
52199- SA5_completed,
52200+ .submit_command = SA5_submit_command,
52201+ .set_intr_mask = SA5_intr_mask,
52202+ .fifo_full = SA5_fifo_full,
52203+ .intr_pending = SA5_intr_pending,
52204+ .command_completed = SA5_completed,
52205 };
52206
52207 static struct access_method SA5_ioaccel_mode1_access = {
52208- SA5_submit_command,
52209- SA5_performant_intr_mask,
52210- SA5_fifo_full,
52211- SA5_ioaccel_mode1_intr_pending,
52212- SA5_ioaccel_mode1_completed,
52213+ .submit_command = SA5_submit_command,
52214+ .set_intr_mask = SA5_performant_intr_mask,
52215+ .fifo_full = SA5_fifo_full,
52216+ .intr_pending = SA5_ioaccel_mode1_intr_pending,
52217+ .command_completed = SA5_ioaccel_mode1_completed,
52218 };
52219
52220 static struct access_method SA5_ioaccel_mode2_access = {
52221- SA5_submit_command_ioaccel2,
52222- SA5_performant_intr_mask,
52223- SA5_fifo_full,
52224- SA5_performant_intr_pending,
52225- SA5_performant_completed,
52226+ .submit_command = SA5_submit_command_ioaccel2,
52227+ .set_intr_mask = SA5_performant_intr_mask,
52228+ .fifo_full = SA5_fifo_full,
52229+ .intr_pending = SA5_performant_intr_pending,
52230+ .command_completed = SA5_performant_completed,
52231 };
52232
52233 static struct access_method SA5_performant_access = {
52234- SA5_submit_command,
52235- SA5_performant_intr_mask,
52236- SA5_fifo_full,
52237- SA5_performant_intr_pending,
52238- SA5_performant_completed,
52239+ .submit_command = SA5_submit_command,
52240+ .set_intr_mask = SA5_performant_intr_mask,
52241+ .fifo_full = SA5_fifo_full,
52242+ .intr_pending = SA5_performant_intr_pending,
52243+ .command_completed = SA5_performant_completed,
52244 };
52245
52246 static struct access_method SA5_performant_access_no_read = {
52247- SA5_submit_command_no_read,
52248- SA5_performant_intr_mask,
52249- SA5_fifo_full,
52250- SA5_performant_intr_pending,
52251- SA5_performant_completed,
52252+ .submit_command = SA5_submit_command_no_read,
52253+ .set_intr_mask = SA5_performant_intr_mask,
52254+ .fifo_full = SA5_fifo_full,
52255+ .intr_pending = SA5_performant_intr_pending,
52256+ .command_completed = SA5_performant_completed,
52257 };
52258
52259 struct board_type {
52260diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
52261index 1b3a094..068e683 100644
52262--- a/drivers/scsi/libfc/fc_exch.c
52263+++ b/drivers/scsi/libfc/fc_exch.c
52264@@ -101,12 +101,12 @@ struct fc_exch_mgr {
52265 u16 pool_max_index;
52266
52267 struct {
52268- atomic_t no_free_exch;
52269- atomic_t no_free_exch_xid;
52270- atomic_t xid_not_found;
52271- atomic_t xid_busy;
52272- atomic_t seq_not_found;
52273- atomic_t non_bls_resp;
52274+ atomic_unchecked_t no_free_exch;
52275+ atomic_unchecked_t no_free_exch_xid;
52276+ atomic_unchecked_t xid_not_found;
52277+ atomic_unchecked_t xid_busy;
52278+ atomic_unchecked_t seq_not_found;
52279+ atomic_unchecked_t non_bls_resp;
52280 } stats;
52281 };
52282
52283@@ -811,7 +811,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
52284 /* allocate memory for exchange */
52285 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
52286 if (!ep) {
52287- atomic_inc(&mp->stats.no_free_exch);
52288+ atomic_inc_unchecked(&mp->stats.no_free_exch);
52289 goto out;
52290 }
52291 memset(ep, 0, sizeof(*ep));
52292@@ -874,7 +874,7 @@ out:
52293 return ep;
52294 err:
52295 spin_unlock_bh(&pool->lock);
52296- atomic_inc(&mp->stats.no_free_exch_xid);
52297+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
52298 mempool_free(ep, mp->ep_pool);
52299 return NULL;
52300 }
52301@@ -1023,7 +1023,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
52302 xid = ntohs(fh->fh_ox_id); /* we originated exch */
52303 ep = fc_exch_find(mp, xid);
52304 if (!ep) {
52305- atomic_inc(&mp->stats.xid_not_found);
52306+ atomic_inc_unchecked(&mp->stats.xid_not_found);
52307 reject = FC_RJT_OX_ID;
52308 goto out;
52309 }
52310@@ -1053,7 +1053,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
52311 ep = fc_exch_find(mp, xid);
52312 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
52313 if (ep) {
52314- atomic_inc(&mp->stats.xid_busy);
52315+ atomic_inc_unchecked(&mp->stats.xid_busy);
52316 reject = FC_RJT_RX_ID;
52317 goto rel;
52318 }
52319@@ -1064,7 +1064,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
52320 }
52321 xid = ep->xid; /* get our XID */
52322 } else if (!ep) {
52323- atomic_inc(&mp->stats.xid_not_found);
52324+ atomic_inc_unchecked(&mp->stats.xid_not_found);
52325 reject = FC_RJT_RX_ID; /* XID not found */
52326 goto out;
52327 }
52328@@ -1082,7 +1082,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
52329 } else {
52330 sp = &ep->seq;
52331 if (sp->id != fh->fh_seq_id) {
52332- atomic_inc(&mp->stats.seq_not_found);
52333+ atomic_inc_unchecked(&mp->stats.seq_not_found);
52334 if (f_ctl & FC_FC_END_SEQ) {
52335 /*
52336 * Update sequence_id based on incoming last
52337@@ -1533,22 +1533,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
52338
52339 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
52340 if (!ep) {
52341- atomic_inc(&mp->stats.xid_not_found);
52342+ atomic_inc_unchecked(&mp->stats.xid_not_found);
52343 goto out;
52344 }
52345 if (ep->esb_stat & ESB_ST_COMPLETE) {
52346- atomic_inc(&mp->stats.xid_not_found);
52347+ atomic_inc_unchecked(&mp->stats.xid_not_found);
52348 goto rel;
52349 }
52350 if (ep->rxid == FC_XID_UNKNOWN)
52351 ep->rxid = ntohs(fh->fh_rx_id);
52352 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
52353- atomic_inc(&mp->stats.xid_not_found);
52354+ atomic_inc_unchecked(&mp->stats.xid_not_found);
52355 goto rel;
52356 }
52357 if (ep->did != ntoh24(fh->fh_s_id) &&
52358 ep->did != FC_FID_FLOGI) {
52359- atomic_inc(&mp->stats.xid_not_found);
52360+ atomic_inc_unchecked(&mp->stats.xid_not_found);
52361 goto rel;
52362 }
52363 sof = fr_sof(fp);
52364@@ -1557,7 +1557,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
52365 sp->ssb_stat |= SSB_ST_RESP;
52366 sp->id = fh->fh_seq_id;
52367 } else if (sp->id != fh->fh_seq_id) {
52368- atomic_inc(&mp->stats.seq_not_found);
52369+ atomic_inc_unchecked(&mp->stats.seq_not_found);
52370 goto rel;
52371 }
52372
52373@@ -1619,9 +1619,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
52374 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
52375
52376 if (!sp)
52377- atomic_inc(&mp->stats.xid_not_found);
52378+ atomic_inc_unchecked(&mp->stats.xid_not_found);
52379 else
52380- atomic_inc(&mp->stats.non_bls_resp);
52381+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
52382
52383 fc_frame_free(fp);
52384 }
52385@@ -2261,13 +2261,13 @@ void fc_exch_update_stats(struct fc_lport *lport)
52386
52387 list_for_each_entry(ema, &lport->ema_list, ema_list) {
52388 mp = ema->mp;
52389- st->fc_no_free_exch += atomic_read(&mp->stats.no_free_exch);
52390+ st->fc_no_free_exch += atomic_read_unchecked(&mp->stats.no_free_exch);
52391 st->fc_no_free_exch_xid +=
52392- atomic_read(&mp->stats.no_free_exch_xid);
52393- st->fc_xid_not_found += atomic_read(&mp->stats.xid_not_found);
52394- st->fc_xid_busy += atomic_read(&mp->stats.xid_busy);
52395- st->fc_seq_not_found += atomic_read(&mp->stats.seq_not_found);
52396- st->fc_non_bls_resp += atomic_read(&mp->stats.non_bls_resp);
52397+ atomic_read_unchecked(&mp->stats.no_free_exch_xid);
52398+ st->fc_xid_not_found += atomic_read_unchecked(&mp->stats.xid_not_found);
52399+ st->fc_xid_busy += atomic_read_unchecked(&mp->stats.xid_busy);
52400+ st->fc_seq_not_found += atomic_read_unchecked(&mp->stats.seq_not_found);
52401+ st->fc_non_bls_resp += atomic_read_unchecked(&mp->stats.non_bls_resp);
52402 }
52403 }
52404 EXPORT_SYMBOL(fc_exch_update_stats);
52405diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
52406index 766098a..1c6c971 100644
52407--- a/drivers/scsi/libsas/sas_ata.c
52408+++ b/drivers/scsi/libsas/sas_ata.c
52409@@ -554,7 +554,7 @@ static struct ata_port_operations sas_sata_ops = {
52410 .postreset = ata_std_postreset,
52411 .error_handler = ata_std_error_handler,
52412 .post_internal_cmd = sas_ata_post_internal,
52413- .qc_defer = ata_std_qc_defer,
52414+ .qc_defer = ata_std_qc_defer,
52415 .qc_prep = ata_noop_qc_prep,
52416 .qc_issue = sas_ata_qc_issue,
52417 .qc_fill_rtf = sas_ata_qc_fill_rtf,
52418diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
52419index 434e903..5a4a79b 100644
52420--- a/drivers/scsi/lpfc/lpfc.h
52421+++ b/drivers/scsi/lpfc/lpfc.h
52422@@ -430,7 +430,7 @@ struct lpfc_vport {
52423 struct dentry *debug_nodelist;
52424 struct dentry *vport_debugfs_root;
52425 struct lpfc_debugfs_trc *disc_trc;
52426- atomic_t disc_trc_cnt;
52427+ atomic_unchecked_t disc_trc_cnt;
52428 #endif
52429 uint8_t stat_data_enabled;
52430 uint8_t stat_data_blocked;
52431@@ -880,8 +880,8 @@ struct lpfc_hba {
52432 struct timer_list fabric_block_timer;
52433 unsigned long bit_flags;
52434 #define FABRIC_COMANDS_BLOCKED 0
52435- atomic_t num_rsrc_err;
52436- atomic_t num_cmd_success;
52437+ atomic_unchecked_t num_rsrc_err;
52438+ atomic_unchecked_t num_cmd_success;
52439 unsigned long last_rsrc_error_time;
52440 unsigned long last_ramp_down_time;
52441 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
52442@@ -916,7 +916,7 @@ struct lpfc_hba {
52443
52444 struct dentry *debug_slow_ring_trc;
52445 struct lpfc_debugfs_trc *slow_ring_trc;
52446- atomic_t slow_ring_trc_cnt;
52447+ atomic_unchecked_t slow_ring_trc_cnt;
52448 /* iDiag debugfs sub-directory */
52449 struct dentry *idiag_root;
52450 struct dentry *idiag_pci_cfg;
52451diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
52452index b0aedce..89c6ca6 100644
52453--- a/drivers/scsi/lpfc/lpfc_debugfs.c
52454+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
52455@@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
52456
52457 #include <linux/debugfs.h>
52458
52459-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
52460+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
52461 static unsigned long lpfc_debugfs_start_time = 0L;
52462
52463 /* iDiag */
52464@@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
52465 lpfc_debugfs_enable = 0;
52466
52467 len = 0;
52468- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
52469+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
52470 (lpfc_debugfs_max_disc_trc - 1);
52471 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
52472 dtp = vport->disc_trc + i;
52473@@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
52474 lpfc_debugfs_enable = 0;
52475
52476 len = 0;
52477- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
52478+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
52479 (lpfc_debugfs_max_slow_ring_trc - 1);
52480 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
52481 dtp = phba->slow_ring_trc + i;
52482@@ -646,14 +646,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
52483 !vport || !vport->disc_trc)
52484 return;
52485
52486- index = atomic_inc_return(&vport->disc_trc_cnt) &
52487+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
52488 (lpfc_debugfs_max_disc_trc - 1);
52489 dtp = vport->disc_trc + index;
52490 dtp->fmt = fmt;
52491 dtp->data1 = data1;
52492 dtp->data2 = data2;
52493 dtp->data3 = data3;
52494- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
52495+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
52496 dtp->jif = jiffies;
52497 #endif
52498 return;
52499@@ -684,14 +684,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
52500 !phba || !phba->slow_ring_trc)
52501 return;
52502
52503- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
52504+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
52505 (lpfc_debugfs_max_slow_ring_trc - 1);
52506 dtp = phba->slow_ring_trc + index;
52507 dtp->fmt = fmt;
52508 dtp->data1 = data1;
52509 dtp->data2 = data2;
52510 dtp->data3 = data3;
52511- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
52512+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
52513 dtp->jif = jiffies;
52514 #endif
52515 return;
52516@@ -4268,7 +4268,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
52517 "slow_ring buffer\n");
52518 goto debug_failed;
52519 }
52520- atomic_set(&phba->slow_ring_trc_cnt, 0);
52521+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
52522 memset(phba->slow_ring_trc, 0,
52523 (sizeof(struct lpfc_debugfs_trc) *
52524 lpfc_debugfs_max_slow_ring_trc));
52525@@ -4314,7 +4314,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
52526 "buffer\n");
52527 goto debug_failed;
52528 }
52529- atomic_set(&vport->disc_trc_cnt, 0);
52530+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
52531
52532 snprintf(name, sizeof(name), "discovery_trace");
52533 vport->debug_disc_trc =
52534diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
52535index a5769a9..718ecc7 100644
52536--- a/drivers/scsi/lpfc/lpfc_init.c
52537+++ b/drivers/scsi/lpfc/lpfc_init.c
52538@@ -11299,8 +11299,10 @@ lpfc_init(void)
52539 "misc_register returned with status %d", error);
52540
52541 if (lpfc_enable_npiv) {
52542- lpfc_transport_functions.vport_create = lpfc_vport_create;
52543- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
52544+ pax_open_kernel();
52545+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
52546+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
52547+ pax_close_kernel();
52548 }
52549 lpfc_transport_template =
52550 fc_attach_transport(&lpfc_transport_functions);
52551diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
52552index 7862c55..5aa65df 100644
52553--- a/drivers/scsi/lpfc/lpfc_scsi.c
52554+++ b/drivers/scsi/lpfc/lpfc_scsi.c
52555@@ -382,7 +382,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
52556 uint32_t evt_posted;
52557
52558 spin_lock_irqsave(&phba->hbalock, flags);
52559- atomic_inc(&phba->num_rsrc_err);
52560+ atomic_inc_unchecked(&phba->num_rsrc_err);
52561 phba->last_rsrc_error_time = jiffies;
52562
52563 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
52564@@ -423,8 +423,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
52565 unsigned long num_rsrc_err, num_cmd_success;
52566 int i;
52567
52568- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
52569- num_cmd_success = atomic_read(&phba->num_cmd_success);
52570+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
52571+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
52572
52573 /*
52574 * The error and success command counters are global per
52575@@ -452,8 +452,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
52576 }
52577 }
52578 lpfc_destroy_vport_work_array(phba, vports);
52579- atomic_set(&phba->num_rsrc_err, 0);
52580- atomic_set(&phba->num_cmd_success, 0);
52581+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
52582+ atomic_set_unchecked(&phba->num_cmd_success, 0);
52583 }
52584
52585 /**
52586diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
52587index dd46101..ca80eb9 100644
52588--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
52589+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
52590@@ -1559,7 +1559,7 @@ _scsih_get_resync(struct device *dev)
52591 {
52592 struct scsi_device *sdev = to_scsi_device(dev);
52593 struct MPT2SAS_ADAPTER *ioc = shost_priv(sdev->host);
52594- static struct _raid_device *raid_device;
52595+ struct _raid_device *raid_device;
52596 unsigned long flags;
52597 Mpi2RaidVolPage0_t vol_pg0;
52598 Mpi2ConfigReply_t mpi_reply;
52599@@ -1611,7 +1611,7 @@ _scsih_get_state(struct device *dev)
52600 {
52601 struct scsi_device *sdev = to_scsi_device(dev);
52602 struct MPT2SAS_ADAPTER *ioc = shost_priv(sdev->host);
52603- static struct _raid_device *raid_device;
52604+ struct _raid_device *raid_device;
52605 unsigned long flags;
52606 Mpi2RaidVolPage0_t vol_pg0;
52607 Mpi2ConfigReply_t mpi_reply;
52608@@ -6648,7 +6648,7 @@ _scsih_sas_ir_operation_status_event(struct MPT2SAS_ADAPTER *ioc,
52609 Mpi2EventDataIrOperationStatus_t *event_data =
52610 (Mpi2EventDataIrOperationStatus_t *)
52611 fw_event->event_data;
52612- static struct _raid_device *raid_device;
52613+ struct _raid_device *raid_device;
52614 unsigned long flags;
52615 u16 handle;
52616
52617@@ -7119,7 +7119,7 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
52618 u64 sas_address;
52619 struct _sas_device *sas_device;
52620 struct _sas_node *expander_device;
52621- static struct _raid_device *raid_device;
52622+ struct _raid_device *raid_device;
52623 u8 retry_count;
52624 unsigned long flags;
52625
52626diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
52627index 6f3275d..fa5e6b6 100644
52628--- a/drivers/scsi/pmcraid.c
52629+++ b/drivers/scsi/pmcraid.c
52630@@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
52631 res->scsi_dev = scsi_dev;
52632 scsi_dev->hostdata = res;
52633 res->change_detected = 0;
52634- atomic_set(&res->read_failures, 0);
52635- atomic_set(&res->write_failures, 0);
52636+ atomic_set_unchecked(&res->read_failures, 0);
52637+ atomic_set_unchecked(&res->write_failures, 0);
52638 rc = 0;
52639 }
52640 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
52641@@ -2687,9 +2687,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
52642
52643 /* If this was a SCSI read/write command keep count of errors */
52644 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
52645- atomic_inc(&res->read_failures);
52646+ atomic_inc_unchecked(&res->read_failures);
52647 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
52648- atomic_inc(&res->write_failures);
52649+ atomic_inc_unchecked(&res->write_failures);
52650
52651 if (!RES_IS_GSCSI(res->cfg_entry) &&
52652 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
52653@@ -3545,7 +3545,7 @@ static int pmcraid_queuecommand_lck(
52654 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
52655 * hrrq_id assigned here in queuecommand
52656 */
52657- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
52658+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
52659 pinstance->num_hrrq;
52660 cmd->cmd_done = pmcraid_io_done;
52661
52662@@ -3857,7 +3857,7 @@ static long pmcraid_ioctl_passthrough(
52663 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
52664 * hrrq_id assigned here in queuecommand
52665 */
52666- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
52667+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
52668 pinstance->num_hrrq;
52669
52670 if (request_size) {
52671@@ -4495,7 +4495,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
52672
52673 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
52674 /* add resources only after host is added into system */
52675- if (!atomic_read(&pinstance->expose_resources))
52676+ if (!atomic_read_unchecked(&pinstance->expose_resources))
52677 return;
52678
52679 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
52680@@ -5322,8 +5322,8 @@ static int pmcraid_init_instance(struct pci_dev *pdev, struct Scsi_Host *host,
52681 init_waitqueue_head(&pinstance->reset_wait_q);
52682
52683 atomic_set(&pinstance->outstanding_cmds, 0);
52684- atomic_set(&pinstance->last_message_id, 0);
52685- atomic_set(&pinstance->expose_resources, 0);
52686+ atomic_set_unchecked(&pinstance->last_message_id, 0);
52687+ atomic_set_unchecked(&pinstance->expose_resources, 0);
52688
52689 INIT_LIST_HEAD(&pinstance->free_res_q);
52690 INIT_LIST_HEAD(&pinstance->used_res_q);
52691@@ -6036,7 +6036,7 @@ static int pmcraid_probe(struct pci_dev *pdev,
52692 /* Schedule worker thread to handle CCN and take care of adding and
52693 * removing devices to OS
52694 */
52695- atomic_set(&pinstance->expose_resources, 1);
52696+ atomic_set_unchecked(&pinstance->expose_resources, 1);
52697 schedule_work(&pinstance->worker_q);
52698 return rc;
52699
52700diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
52701index e1d150f..6c6df44 100644
52702--- a/drivers/scsi/pmcraid.h
52703+++ b/drivers/scsi/pmcraid.h
52704@@ -748,7 +748,7 @@ struct pmcraid_instance {
52705 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
52706
52707 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
52708- atomic_t last_message_id;
52709+ atomic_unchecked_t last_message_id;
52710
52711 /* configuration table */
52712 struct pmcraid_config_table *cfg_table;
52713@@ -777,7 +777,7 @@ struct pmcraid_instance {
52714 atomic_t outstanding_cmds;
52715
52716 /* should add/delete resources to mid-layer now ?*/
52717- atomic_t expose_resources;
52718+ atomic_unchecked_t expose_resources;
52719
52720
52721
52722@@ -813,8 +813,8 @@ struct pmcraid_resource_entry {
52723 struct pmcraid_config_table_entry_ext cfg_entry_ext;
52724 };
52725 struct scsi_device *scsi_dev; /* Link scsi_device structure */
52726- atomic_t read_failures; /* count of failed READ commands */
52727- atomic_t write_failures; /* count of failed WRITE commands */
52728+ atomic_unchecked_t read_failures; /* count of failed READ commands */
52729+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
52730
52731 /* To indicate add/delete/modify during CCN */
52732 u8 change_detected;
52733diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
52734index 16fe519..3b1ec82 100644
52735--- a/drivers/scsi/qla2xxx/qla_attr.c
52736+++ b/drivers/scsi/qla2xxx/qla_attr.c
52737@@ -2188,7 +2188,7 @@ qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
52738 return 0;
52739 }
52740
52741-struct fc_function_template qla2xxx_transport_functions = {
52742+fc_function_template_no_const qla2xxx_transport_functions = {
52743
52744 .show_host_node_name = 1,
52745 .show_host_port_name = 1,
52746@@ -2236,7 +2236,7 @@ struct fc_function_template qla2xxx_transport_functions = {
52747 .bsg_timeout = qla24xx_bsg_timeout,
52748 };
52749
52750-struct fc_function_template qla2xxx_transport_vport_functions = {
52751+fc_function_template_no_const qla2xxx_transport_vport_functions = {
52752
52753 .show_host_node_name = 1,
52754 .show_host_port_name = 1,
52755diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
52756index d646540..5b13554 100644
52757--- a/drivers/scsi/qla2xxx/qla_gbl.h
52758+++ b/drivers/scsi/qla2xxx/qla_gbl.h
52759@@ -569,8 +569,8 @@ extern void qla2x00_get_sym_node_name(scsi_qla_host_t *, uint8_t *);
52760 struct device_attribute;
52761 extern struct device_attribute *qla2x00_host_attrs[];
52762 struct fc_function_template;
52763-extern struct fc_function_template qla2xxx_transport_functions;
52764-extern struct fc_function_template qla2xxx_transport_vport_functions;
52765+extern fc_function_template_no_const qla2xxx_transport_functions;
52766+extern fc_function_template_no_const qla2xxx_transport_vport_functions;
52767 extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *);
52768 extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *, bool);
52769 extern void qla2x00_init_host_attr(scsi_qla_host_t *);
52770diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
52771index 8252c0e..613adad 100644
52772--- a/drivers/scsi/qla2xxx/qla_os.c
52773+++ b/drivers/scsi/qla2xxx/qla_os.c
52774@@ -1493,8 +1493,10 @@ qla2x00_config_dma_addressing(struct qla_hw_data *ha)
52775 !pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
52776 /* Ok, a 64bit DMA mask is applicable. */
52777 ha->flags.enable_64bit_addressing = 1;
52778- ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
52779- ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
52780+ pax_open_kernel();
52781+ *(void **)&ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
52782+ *(void **)&ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
52783+ pax_close_kernel();
52784 return;
52785 }
52786 }
52787diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
52788index 8f6d0fb..1b21097 100644
52789--- a/drivers/scsi/qla4xxx/ql4_def.h
52790+++ b/drivers/scsi/qla4xxx/ql4_def.h
52791@@ -305,7 +305,7 @@ struct ddb_entry {
52792 * (4000 only) */
52793 atomic_t relogin_timer; /* Max Time to wait for
52794 * relogin to complete */
52795- atomic_t relogin_retry_count; /* Num of times relogin has been
52796+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
52797 * retried */
52798 uint32_t default_time2wait; /* Default Min time between
52799 * relogins (+aens) */
52800diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
52801index 199fcf7..3c3a918 100644
52802--- a/drivers/scsi/qla4xxx/ql4_os.c
52803+++ b/drivers/scsi/qla4xxx/ql4_os.c
52804@@ -4496,12 +4496,12 @@ static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
52805 */
52806 if (!iscsi_is_session_online(cls_sess)) {
52807 /* Reset retry relogin timer */
52808- atomic_inc(&ddb_entry->relogin_retry_count);
52809+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
52810 DEBUG2(ql4_printk(KERN_INFO, ha,
52811 "%s: index[%d] relogin timed out-retrying"
52812 " relogin (%d), retry (%d)\n", __func__,
52813 ddb_entry->fw_ddb_index,
52814- atomic_read(&ddb_entry->relogin_retry_count),
52815+ atomic_read_unchecked(&ddb_entry->relogin_retry_count),
52816 ddb_entry->default_time2wait + 4));
52817 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
52818 atomic_set(&ddb_entry->retry_relogin_timer,
52819@@ -6609,7 +6609,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
52820
52821 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
52822 atomic_set(&ddb_entry->relogin_timer, 0);
52823- atomic_set(&ddb_entry->relogin_retry_count, 0);
52824+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
52825 def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
52826 ddb_entry->default_relogin_timeout =
52827 (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
52828diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
52829index d81f3cc..0093e5b 100644
52830--- a/drivers/scsi/scsi.c
52831+++ b/drivers/scsi/scsi.c
52832@@ -645,7 +645,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
52833 struct Scsi_Host *host = cmd->device->host;
52834 int rtn = 0;
52835
52836- atomic_inc(&cmd->device->iorequest_cnt);
52837+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
52838
52839 /* check if the device is still usable */
52840 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
52841diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
52842index aaea4b9..c64408d 100644
52843--- a/drivers/scsi/scsi_lib.c
52844+++ b/drivers/scsi/scsi_lib.c
52845@@ -1581,7 +1581,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
52846 shost = sdev->host;
52847 scsi_init_cmd_errh(cmd);
52848 cmd->result = DID_NO_CONNECT << 16;
52849- atomic_inc(&cmd->device->iorequest_cnt);
52850+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
52851
52852 /*
52853 * SCSI request completion path will do scsi_device_unbusy(),
52854@@ -1604,9 +1604,9 @@ static void scsi_softirq_done(struct request *rq)
52855
52856 INIT_LIST_HEAD(&cmd->eh_entry);
52857
52858- atomic_inc(&cmd->device->iodone_cnt);
52859+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
52860 if (cmd->result)
52861- atomic_inc(&cmd->device->ioerr_cnt);
52862+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
52863
52864 disposition = scsi_decide_disposition(cmd);
52865 if (disposition != SUCCESS &&
52866diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
52867index 8b4105a..1f58363 100644
52868--- a/drivers/scsi/scsi_sysfs.c
52869+++ b/drivers/scsi/scsi_sysfs.c
52870@@ -805,7 +805,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
52871 char *buf) \
52872 { \
52873 struct scsi_device *sdev = to_scsi_device(dev); \
52874- unsigned long long count = atomic_read(&sdev->field); \
52875+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
52876 return snprintf(buf, 20, "0x%llx\n", count); \
52877 } \
52878 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
52879diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
52880index 5d6f348..18778a6b 100644
52881--- a/drivers/scsi/scsi_transport_fc.c
52882+++ b/drivers/scsi/scsi_transport_fc.c
52883@@ -501,7 +501,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
52884 * Netlink Infrastructure
52885 */
52886
52887-static atomic_t fc_event_seq;
52888+static atomic_unchecked_t fc_event_seq;
52889
52890 /**
52891 * fc_get_event_number - Obtain the next sequential FC event number
52892@@ -514,7 +514,7 @@ static atomic_t fc_event_seq;
52893 u32
52894 fc_get_event_number(void)
52895 {
52896- return atomic_add_return(1, &fc_event_seq);
52897+ return atomic_add_return_unchecked(1, &fc_event_seq);
52898 }
52899 EXPORT_SYMBOL(fc_get_event_number);
52900
52901@@ -658,7 +658,7 @@ static __init int fc_transport_init(void)
52902 {
52903 int error;
52904
52905- atomic_set(&fc_event_seq, 0);
52906+ atomic_set_unchecked(&fc_event_seq, 0);
52907
52908 error = transport_class_register(&fc_host_class);
52909 if (error)
52910@@ -848,7 +848,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
52911 char *cp;
52912
52913 *val = simple_strtoul(buf, &cp, 0);
52914- if ((*cp && (*cp != '\n')) || (*val < 0))
52915+ if (*cp && (*cp != '\n'))
52916 return -EINVAL;
52917 /*
52918 * Check for overflow; dev_loss_tmo is u32
52919diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
52920index 67d43e3..8cee73c 100644
52921--- a/drivers/scsi/scsi_transport_iscsi.c
52922+++ b/drivers/scsi/scsi_transport_iscsi.c
52923@@ -79,7 +79,7 @@ struct iscsi_internal {
52924 struct transport_container session_cont;
52925 };
52926
52927-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
52928+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
52929 static struct workqueue_struct *iscsi_eh_timer_workq;
52930
52931 static DEFINE_IDA(iscsi_sess_ida);
52932@@ -2071,7 +2071,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
52933 int err;
52934
52935 ihost = shost->shost_data;
52936- session->sid = atomic_add_return(1, &iscsi_session_nr);
52937+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
52938
52939 if (target_id == ISCSI_MAX_TARGET) {
52940 id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
52941@@ -4515,7 +4515,7 @@ static __init int iscsi_transport_init(void)
52942 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
52943 ISCSI_TRANSPORT_VERSION);
52944
52945- atomic_set(&iscsi_session_nr, 0);
52946+ atomic_set_unchecked(&iscsi_session_nr, 0);
52947
52948 err = class_register(&iscsi_transport_class);
52949 if (err)
52950diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
52951index ae45bd9..c32a586 100644
52952--- a/drivers/scsi/scsi_transport_srp.c
52953+++ b/drivers/scsi/scsi_transport_srp.c
52954@@ -35,7 +35,7 @@
52955 #include "scsi_priv.h"
52956
52957 struct srp_host_attrs {
52958- atomic_t next_port_id;
52959+ atomic_unchecked_t next_port_id;
52960 };
52961 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
52962
52963@@ -100,7 +100,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
52964 struct Scsi_Host *shost = dev_to_shost(dev);
52965 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
52966
52967- atomic_set(&srp_host->next_port_id, 0);
52968+ atomic_set_unchecked(&srp_host->next_port_id, 0);
52969 return 0;
52970 }
52971
52972@@ -734,7 +734,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
52973 rport_fast_io_fail_timedout);
52974 INIT_DELAYED_WORK(&rport->dev_loss_work, rport_dev_loss_timedout);
52975
52976- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
52977+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
52978 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
52979
52980 transport_setup_device(&rport->dev);
52981diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
52982index 2c2041c..9d94085 100644
52983--- a/drivers/scsi/sd.c
52984+++ b/drivers/scsi/sd.c
52985@@ -3002,7 +3002,7 @@ static int sd_probe(struct device *dev)
52986 sdkp->disk = gd;
52987 sdkp->index = index;
52988 atomic_set(&sdkp->openers, 0);
52989- atomic_set(&sdkp->device->ioerr_cnt, 0);
52990+ atomic_set_unchecked(&sdkp->device->ioerr_cnt, 0);
52991
52992 if (!sdp->request_queue->rq_timeout) {
52993 if (sdp->type != TYPE_MOD)
52994diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
52995index 01cf888..59e0475 100644
52996--- a/drivers/scsi/sg.c
52997+++ b/drivers/scsi/sg.c
52998@@ -1138,7 +1138,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
52999 sdp->disk->disk_name,
53000 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
53001 NULL,
53002- (char *)arg);
53003+ (char __user *)arg);
53004 case BLKTRACESTART:
53005 return blk_trace_startstop(sdp->device->request_queue, 1);
53006 case BLKTRACESTOP:
53007diff --git a/drivers/soc/tegra/fuse/fuse-tegra.c b/drivers/soc/tegra/fuse/fuse-tegra.c
53008index 11a5043..e36f04c 100644
53009--- a/drivers/soc/tegra/fuse/fuse-tegra.c
53010+++ b/drivers/soc/tegra/fuse/fuse-tegra.c
53011@@ -70,7 +70,7 @@ static ssize_t fuse_read(struct file *fd, struct kobject *kobj,
53012 return i;
53013 }
53014
53015-static struct bin_attribute fuse_bin_attr = {
53016+static bin_attribute_no_const fuse_bin_attr = {
53017 .attr = { .name = "fuse", .mode = S_IRUGO, },
53018 .read = fuse_read,
53019 };
53020diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
53021index ca935df..ae8a3dc 100644
53022--- a/drivers/spi/spi.c
53023+++ b/drivers/spi/spi.c
53024@@ -2210,7 +2210,7 @@ int spi_bus_unlock(struct spi_master *master)
53025 EXPORT_SYMBOL_GPL(spi_bus_unlock);
53026
53027 /* portable code must never pass more than 32 bytes */
53028-#define SPI_BUFSIZ max(32, SMP_CACHE_BYTES)
53029+#define SPI_BUFSIZ max(32UL, SMP_CACHE_BYTES)
53030
53031 static u8 *buf;
53032
53033diff --git a/drivers/staging/android/timed_output.c b/drivers/staging/android/timed_output.c
53034index b41429f..2de5373 100644
53035--- a/drivers/staging/android/timed_output.c
53036+++ b/drivers/staging/android/timed_output.c
53037@@ -25,7 +25,7 @@
53038 #include "timed_output.h"
53039
53040 static struct class *timed_output_class;
53041-static atomic_t device_count;
53042+static atomic_unchecked_t device_count;
53043
53044 static ssize_t enable_show(struct device *dev, struct device_attribute *attr,
53045 char *buf)
53046@@ -65,7 +65,7 @@ static int create_timed_output_class(void)
53047 timed_output_class = class_create(THIS_MODULE, "timed_output");
53048 if (IS_ERR(timed_output_class))
53049 return PTR_ERR(timed_output_class);
53050- atomic_set(&device_count, 0);
53051+ atomic_set_unchecked(&device_count, 0);
53052 timed_output_class->dev_groups = timed_output_groups;
53053 }
53054
53055@@ -83,7 +83,7 @@ int timed_output_dev_register(struct timed_output_dev *tdev)
53056 if (ret < 0)
53057 return ret;
53058
53059- tdev->index = atomic_inc_return(&device_count);
53060+ tdev->index = atomic_inc_return_unchecked(&device_count);
53061 tdev->dev = device_create(timed_output_class, NULL,
53062 MKDEV(0, tdev->index), NULL, "%s", tdev->name);
53063 if (IS_ERR(tdev->dev))
53064diff --git a/drivers/staging/gdm724x/gdm_tty.c b/drivers/staging/gdm724x/gdm_tty.c
53065index 001348c..cfaac8a 100644
53066--- a/drivers/staging/gdm724x/gdm_tty.c
53067+++ b/drivers/staging/gdm724x/gdm_tty.c
53068@@ -44,7 +44,7 @@
53069 #define gdm_tty_send_control(n, r, v, d, l) (\
53070 n->tty_dev->send_control(n->tty_dev->priv_dev, r, v, d, l))
53071
53072-#define GDM_TTY_READY(gdm) (gdm && gdm->tty_dev && gdm->port.count)
53073+#define GDM_TTY_READY(gdm) (gdm && gdm->tty_dev && atomic_read(&gdm->port.count))
53074
53075 static struct tty_driver *gdm_driver[TTY_MAX_COUNT];
53076 static struct gdm *gdm_table[TTY_MAX_COUNT][GDM_TTY_MINOR];
53077diff --git a/drivers/staging/imx-drm/imx-drm-core.c b/drivers/staging/imx-drm/imx-drm-core.c
53078index 6b22106..6c6e641 100644
53079--- a/drivers/staging/imx-drm/imx-drm-core.c
53080+++ b/drivers/staging/imx-drm/imx-drm-core.c
53081@@ -355,7 +355,7 @@ int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc,
53082 if (imxdrm->pipes >= MAX_CRTC)
53083 return -EINVAL;
53084
53085- if (imxdrm->drm->open_count)
53086+ if (local_read(&imxdrm->drm->open_count))
53087 return -EBUSY;
53088
53089 imx_drm_crtc = kzalloc(sizeof(*imx_drm_crtc), GFP_KERNEL);
53090diff --git a/drivers/staging/lustre/lnet/selftest/brw_test.c b/drivers/staging/lustre/lnet/selftest/brw_test.c
53091index bcce919..f30fcf9 100644
53092--- a/drivers/staging/lustre/lnet/selftest/brw_test.c
53093+++ b/drivers/staging/lustre/lnet/selftest/brw_test.c
53094@@ -488,13 +488,11 @@ brw_server_handle(struct srpc_server_rpc *rpc)
53095 return 0;
53096 }
53097
53098-sfw_test_client_ops_t brw_test_client;
53099-void brw_init_test_client(void)
53100-{
53101- brw_test_client.tso_init = brw_client_init;
53102- brw_test_client.tso_fini = brw_client_fini;
53103- brw_test_client.tso_prep_rpc = brw_client_prep_rpc;
53104- brw_test_client.tso_done_rpc = brw_client_done_rpc;
53105+sfw_test_client_ops_t brw_test_client = {
53106+ .tso_init = brw_client_init,
53107+ .tso_fini = brw_client_fini,
53108+ .tso_prep_rpc = brw_client_prep_rpc,
53109+ .tso_done_rpc = brw_client_done_rpc,
53110 };
53111
53112 srpc_service_t brw_test_service;
53113diff --git a/drivers/staging/lustre/lnet/selftest/framework.c b/drivers/staging/lustre/lnet/selftest/framework.c
53114index 7e83dff..1f9a545 100644
53115--- a/drivers/staging/lustre/lnet/selftest/framework.c
53116+++ b/drivers/staging/lustre/lnet/selftest/framework.c
53117@@ -1633,12 +1633,10 @@ static srpc_service_t sfw_services[] =
53118
53119 extern sfw_test_client_ops_t ping_test_client;
53120 extern srpc_service_t ping_test_service;
53121-extern void ping_init_test_client(void);
53122 extern void ping_init_test_service(void);
53123
53124 extern sfw_test_client_ops_t brw_test_client;
53125 extern srpc_service_t brw_test_service;
53126-extern void brw_init_test_client(void);
53127 extern void brw_init_test_service(void);
53128
53129
53130@@ -1682,12 +1680,10 @@ sfw_startup (void)
53131 INIT_LIST_HEAD(&sfw_data.fw_zombie_rpcs);
53132 INIT_LIST_HEAD(&sfw_data.fw_zombie_sessions);
53133
53134- brw_init_test_client();
53135 brw_init_test_service();
53136 rc = sfw_register_test(&brw_test_service, &brw_test_client);
53137 LASSERT (rc == 0);
53138
53139- ping_init_test_client();
53140 ping_init_test_service();
53141 rc = sfw_register_test(&ping_test_service, &ping_test_client);
53142 LASSERT (rc == 0);
53143diff --git a/drivers/staging/lustre/lnet/selftest/ping_test.c b/drivers/staging/lustre/lnet/selftest/ping_test.c
53144index 750cac4..e4d751f 100644
53145--- a/drivers/staging/lustre/lnet/selftest/ping_test.c
53146+++ b/drivers/staging/lustre/lnet/selftest/ping_test.c
53147@@ -211,14 +211,12 @@ ping_server_handle(struct srpc_server_rpc *rpc)
53148 return 0;
53149 }
53150
53151-sfw_test_client_ops_t ping_test_client;
53152-void ping_init_test_client(void)
53153-{
53154- ping_test_client.tso_init = ping_client_init;
53155- ping_test_client.tso_fini = ping_client_fini;
53156- ping_test_client.tso_prep_rpc = ping_client_prep_rpc;
53157- ping_test_client.tso_done_rpc = ping_client_done_rpc;
53158-}
53159+sfw_test_client_ops_t ping_test_client = {
53160+ .tso_init = ping_client_init,
53161+ .tso_fini = ping_client_fini,
53162+ .tso_prep_rpc = ping_client_prep_rpc,
53163+ .tso_done_rpc = ping_client_done_rpc,
53164+};
53165
53166 srpc_service_t ping_test_service;
53167 void ping_init_test_service(void)
53168diff --git a/drivers/staging/lustre/lustre/include/lustre_dlm.h b/drivers/staging/lustre/lustre/include/lustre_dlm.h
53169index 30b1812f..9e5bd0b 100644
53170--- a/drivers/staging/lustre/lustre/include/lustre_dlm.h
53171+++ b/drivers/staging/lustre/lustre/include/lustre_dlm.h
53172@@ -1141,7 +1141,7 @@ struct ldlm_callback_suite {
53173 ldlm_completion_callback lcs_completion;
53174 ldlm_blocking_callback lcs_blocking;
53175 ldlm_glimpse_callback lcs_glimpse;
53176-};
53177+} __no_const;
53178
53179 /* ldlm_lockd.c */
53180 int ldlm_del_waiting_lock(struct ldlm_lock *lock);
53181diff --git a/drivers/staging/lustre/lustre/include/obd.h b/drivers/staging/lustre/lustre/include/obd.h
53182index 489bdd3..65058081 100644
53183--- a/drivers/staging/lustre/lustre/include/obd.h
53184+++ b/drivers/staging/lustre/lustre/include/obd.h
53185@@ -1438,7 +1438,7 @@ struct md_ops {
53186 * lprocfs_alloc_md_stats() in obdclass/lprocfs_status.c. Also, add a
53187 * wrapper function in include/linux/obd_class.h.
53188 */
53189-};
53190+} __no_const;
53191
53192 struct lsm_operations {
53193 void (*lsm_free)(struct lov_stripe_md *);
53194diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
53195index b798daa..b28ca8f 100644
53196--- a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
53197+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
53198@@ -258,7 +258,7 @@ ldlm_process_flock_lock(struct ldlm_lock *req, __u64 *flags, int first_enq,
53199 int added = (mode == LCK_NL);
53200 int overlaps = 0;
53201 int splitted = 0;
53202- const struct ldlm_callback_suite null_cbs = { NULL };
53203+ const struct ldlm_callback_suite null_cbs = { };
53204
53205 CDEBUG(D_DLMTRACE, "flags %#llx owner %llu pid %u mode %u start %llu end %llu\n",
53206 *flags, new->l_policy_data.l_flock.owner,
53207diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
53208index 13a9266..3439390 100644
53209--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
53210+++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
53211@@ -235,7 +235,7 @@ int proc_console_max_delay_cs(struct ctl_table *table, int write,
53212 void __user *buffer, size_t *lenp, loff_t *ppos)
53213 {
53214 int rc, max_delay_cs;
53215- struct ctl_table dummy = *table;
53216+ ctl_table_no_const dummy = *table;
53217 long d;
53218
53219 dummy.data = &max_delay_cs;
53220@@ -267,7 +267,7 @@ int proc_console_min_delay_cs(struct ctl_table *table, int write,
53221 void __user *buffer, size_t *lenp, loff_t *ppos)
53222 {
53223 int rc, min_delay_cs;
53224- struct ctl_table dummy = *table;
53225+ ctl_table_no_const dummy = *table;
53226 long d;
53227
53228 dummy.data = &min_delay_cs;
53229@@ -299,7 +299,7 @@ int proc_console_backoff(struct ctl_table *table, int write,
53230 void __user *buffer, size_t *lenp, loff_t *ppos)
53231 {
53232 int rc, backoff;
53233- struct ctl_table dummy = *table;
53234+ ctl_table_no_const dummy = *table;
53235
53236 dummy.data = &backoff;
53237 dummy.proc_handler = &proc_dointvec;
53238diff --git a/drivers/staging/lustre/lustre/libcfs/module.c b/drivers/staging/lustre/lustre/libcfs/module.c
53239index 3396858..c0bd996 100644
53240--- a/drivers/staging/lustre/lustre/libcfs/module.c
53241+++ b/drivers/staging/lustre/lustre/libcfs/module.c
53242@@ -314,11 +314,11 @@ out:
53243
53244
53245 struct cfs_psdev_ops libcfs_psdev_ops = {
53246- libcfs_psdev_open,
53247- libcfs_psdev_release,
53248- NULL,
53249- NULL,
53250- libcfs_ioctl
53251+ .p_open = libcfs_psdev_open,
53252+ .p_close = libcfs_psdev_release,
53253+ .p_read = NULL,
53254+ .p_write = NULL,
53255+ .p_ioctl = libcfs_ioctl
53256 };
53257
53258 extern int insert_proc(void);
53259diff --git a/drivers/staging/lustre/lustre/llite/dir.c b/drivers/staging/lustre/lustre/llite/dir.c
53260index efa2faf..03a9836 100644
53261--- a/drivers/staging/lustre/lustre/llite/dir.c
53262+++ b/drivers/staging/lustre/lustre/llite/dir.c
53263@@ -659,7 +659,7 @@ int ll_dir_setdirstripe(struct inode *dir, struct lmv_user_md *lump,
53264 int mode;
53265 int err;
53266
53267- mode = (0755 & (S_IRWXUGO|S_ISVTX) & ~current->fs->umask) | S_IFDIR;
53268+ mode = (0755 & (S_IRWXUGO|S_ISVTX) & ~current_umask()) | S_IFDIR;
53269 op_data = ll_prep_md_op_data(NULL, dir, NULL, filename,
53270 strlen(filename), mode, LUSTRE_OPC_MKDIR,
53271 lump);
53272diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
53273index a0f4868..139f1fb 100644
53274--- a/drivers/staging/octeon/ethernet-rx.c
53275+++ b/drivers/staging/octeon/ethernet-rx.c
53276@@ -417,11 +417,11 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
53277 /* Increment RX stats for virtual ports */
53278 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
53279 #ifdef CONFIG_64BIT
53280- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
53281- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
53282+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
53283+ atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
53284 #else
53285- atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
53286- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
53287+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
53288+ atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
53289 #endif
53290 }
53291 netif_receive_skb(skb);
53292@@ -432,9 +432,9 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
53293 dev->name);
53294 */
53295 #ifdef CONFIG_64BIT
53296- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
53297+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
53298 #else
53299- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
53300+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
53301 #endif
53302 dev_kfree_skb_irq(skb);
53303 }
53304diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
53305index 2aa7235..ba3c205 100644
53306--- a/drivers/staging/octeon/ethernet.c
53307+++ b/drivers/staging/octeon/ethernet.c
53308@@ -247,11 +247,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
53309 * since the RX tasklet also increments it.
53310 */
53311 #ifdef CONFIG_64BIT
53312- atomic64_add(rx_status.dropped_packets,
53313- (atomic64_t *)&priv->stats.rx_dropped);
53314+ atomic64_add_unchecked(rx_status.dropped_packets,
53315+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
53316 #else
53317- atomic_add(rx_status.dropped_packets,
53318- (atomic_t *)&priv->stats.rx_dropped);
53319+ atomic_add_unchecked(rx_status.dropped_packets,
53320+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
53321 #endif
53322 }
53323
53324diff --git a/drivers/staging/rtl8188eu/include/hal_intf.h b/drivers/staging/rtl8188eu/include/hal_intf.h
53325index 56d5c50..a14f4db 100644
53326--- a/drivers/staging/rtl8188eu/include/hal_intf.h
53327+++ b/drivers/staging/rtl8188eu/include/hal_intf.h
53328@@ -234,7 +234,7 @@ struct hal_ops {
53329
53330 void (*hal_notch_filter)(struct adapter *adapter, bool enable);
53331 void (*hal_reset_security_engine)(struct adapter *adapter);
53332-};
53333+} __no_const;
53334
53335 enum rt_eeprom_type {
53336 EEPROM_93C46,
53337diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
53338index dc23395..cf7e9b1 100644
53339--- a/drivers/staging/rtl8712/rtl871x_io.h
53340+++ b/drivers/staging/rtl8712/rtl871x_io.h
53341@@ -108,7 +108,7 @@ struct _io_ops {
53342 u8 *pmem);
53343 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
53344 u8 *pmem);
53345-};
53346+} __no_const;
53347
53348 struct io_req {
53349 struct list_head list;
53350diff --git a/drivers/staging/unisys/visorchipset/visorchipset.h b/drivers/staging/unisys/visorchipset/visorchipset.h
53351index 2bf2e2f..84421c9 100644
53352--- a/drivers/staging/unisys/visorchipset/visorchipset.h
53353+++ b/drivers/staging/unisys/visorchipset/visorchipset.h
53354@@ -228,7 +228,7 @@ typedef struct {
53355 void (*device_resume)(ulong busNo, ulong devNo);
53356 int (*get_channel_info)(uuid_le typeGuid, ulong *minSize,
53357 ulong *maxSize);
53358-} VISORCHIPSET_BUSDEV_NOTIFIERS;
53359+} __no_const VISORCHIPSET_BUSDEV_NOTIFIERS;
53360
53361 /* These functions live inside visorchipset, and will be called to indicate
53362 * responses to specific events (by code outside of visorchipset).
53363@@ -243,7 +243,7 @@ typedef struct {
53364 void (*device_destroy)(ulong busNo, ulong devNo, int response);
53365 void (*device_pause)(ulong busNo, ulong devNo, int response);
53366 void (*device_resume)(ulong busNo, ulong devNo, int response);
53367-} VISORCHIPSET_BUSDEV_RESPONDERS;
53368+} __no_const VISORCHIPSET_BUSDEV_RESPONDERS;
53369
53370 /** Register functions (in the bus driver) to get called by visorchipset
53371 * whenever a bus or device appears for which this service partition is
53372diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
53373index 164136b..7244df5 100644
53374--- a/drivers/staging/vt6655/hostap.c
53375+++ b/drivers/staging/vt6655/hostap.c
53376@@ -68,14 +68,13 @@ static int msglevel = MSG_LEVEL_INFO;
53377 *
53378 */
53379
53380+static net_device_ops_no_const apdev_netdev_ops;
53381+
53382 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
53383 {
53384 PSDevice apdev_priv;
53385 struct net_device *dev = pDevice->dev;
53386 int ret;
53387- const struct net_device_ops apdev_netdev_ops = {
53388- .ndo_start_xmit = pDevice->tx_80211,
53389- };
53390
53391 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
53392
53393@@ -87,6 +86,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
53394 *apdev_priv = *pDevice;
53395 eth_hw_addr_inherit(pDevice->apdev, dev);
53396
53397+ /* only half broken now */
53398+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
53399 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
53400
53401 pDevice->apdev->type = ARPHRD_IEEE80211;
53402diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
53403index e7e9372..161f530 100644
53404--- a/drivers/target/sbp/sbp_target.c
53405+++ b/drivers/target/sbp/sbp_target.c
53406@@ -62,7 +62,7 @@ static const u32 sbp_unit_directory_template[] = {
53407
53408 #define SESSION_MAINTENANCE_INTERVAL HZ
53409
53410-static atomic_t login_id = ATOMIC_INIT(0);
53411+static atomic_unchecked_t login_id = ATOMIC_INIT(0);
53412
53413 static void session_maintenance_work(struct work_struct *);
53414 static int sbp_run_transaction(struct fw_card *, int, int, int, int,
53415@@ -444,7 +444,7 @@ static void sbp_management_request_login(
53416 login->lun = se_lun;
53417 login->status_fifo_addr = sbp2_pointer_to_addr(&req->orb.status_fifo);
53418 login->exclusive = LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc));
53419- login->login_id = atomic_inc_return(&login_id);
53420+ login->login_id = atomic_inc_return_unchecked(&login_id);
53421
53422 login->tgt_agt = sbp_target_agent_register(login);
53423 if (IS_ERR(login->tgt_agt)) {
53424diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
53425index 98da901..bb443e8 100644
53426--- a/drivers/target/target_core_device.c
53427+++ b/drivers/target/target_core_device.c
53428@@ -1525,7 +1525,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
53429 spin_lock_init(&dev->se_tmr_lock);
53430 spin_lock_init(&dev->qf_cmd_lock);
53431 sema_init(&dev->caw_sem, 1);
53432- atomic_set(&dev->dev_ordered_id, 0);
53433+ atomic_set_unchecked(&dev->dev_ordered_id, 0);
53434 INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
53435 spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
53436 INIT_LIST_HEAD(&dev->t10_pr.registration_list);
53437diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
53438index 7fa62fc..abdd041 100644
53439--- a/drivers/target/target_core_transport.c
53440+++ b/drivers/target/target_core_transport.c
53441@@ -1165,7 +1165,7 @@ transport_check_alloc_task_attr(struct se_cmd *cmd)
53442 * Used to determine when ORDERED commands should go from
53443 * Dormant to Active status.
53444 */
53445- cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id);
53446+ cmd->se_ordered_id = atomic_inc_return_unchecked(&dev->dev_ordered_id);
53447 smp_mb__after_atomic();
53448 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
53449 cmd->se_ordered_id, cmd->sam_task_attr,
53450diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
53451index 4b2b999..cad9fa5 100644
53452--- a/drivers/thermal/of-thermal.c
53453+++ b/drivers/thermal/of-thermal.c
53454@@ -30,6 +30,7 @@
53455 #include <linux/err.h>
53456 #include <linux/export.h>
53457 #include <linux/string.h>
53458+#include <linux/mm.h>
53459
53460 #include "thermal_core.h"
53461
53462@@ -341,8 +342,10 @@ thermal_zone_of_add_sensor(struct device_node *zone,
53463 tz->get_trend = get_trend;
53464 tz->sensor_data = data;
53465
53466- tzd->ops->get_temp = of_thermal_get_temp;
53467- tzd->ops->get_trend = of_thermal_get_trend;
53468+ pax_open_kernel();
53469+ *(void **)&tzd->ops->get_temp = of_thermal_get_temp;
53470+ *(void **)&tzd->ops->get_trend = of_thermal_get_trend;
53471+ pax_close_kernel();
53472 mutex_unlock(&tzd->lock);
53473
53474 return tzd;
53475@@ -461,8 +464,10 @@ void thermal_zone_of_sensor_unregister(struct device *dev,
53476 return;
53477
53478 mutex_lock(&tzd->lock);
53479- tzd->ops->get_temp = NULL;
53480- tzd->ops->get_trend = NULL;
53481+ pax_open_kernel();
53482+ *(void **)&tzd->ops->get_temp = NULL;
53483+ *(void **)&tzd->ops->get_trend = NULL;
53484+ pax_close_kernel();
53485
53486 tz->get_temp = NULL;
53487 tz->get_trend = NULL;
53488diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c
53489index fd66f57..48e6376 100644
53490--- a/drivers/tty/cyclades.c
53491+++ b/drivers/tty/cyclades.c
53492@@ -1570,10 +1570,10 @@ static int cy_open(struct tty_struct *tty, struct file *filp)
53493 printk(KERN_DEBUG "cyc:cy_open ttyC%d, count = %d\n", info->line,
53494 info->port.count);
53495 #endif
53496- info->port.count++;
53497+ atomic_inc(&info->port.count);
53498 #ifdef CY_DEBUG_COUNT
53499 printk(KERN_DEBUG "cyc:cy_open (%d): incrementing count to %d\n",
53500- current->pid, info->port.count);
53501+ current->pid, atomic_read(&info->port.count));
53502 #endif
53503
53504 /*
53505@@ -3974,7 +3974,7 @@ static int cyclades_proc_show(struct seq_file *m, void *v)
53506 for (j = 0; j < cy_card[i].nports; j++) {
53507 info = &cy_card[i].ports[j];
53508
53509- if (info->port.count) {
53510+ if (atomic_read(&info->port.count)) {
53511 /* XXX is the ldisc num worth this? */
53512 struct tty_struct *tty;
53513 struct tty_ldisc *ld;
53514diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
53515index 4fcec1d..5a036f7 100644
53516--- a/drivers/tty/hvc/hvc_console.c
53517+++ b/drivers/tty/hvc/hvc_console.c
53518@@ -342,7 +342,7 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
53519
53520 spin_lock_irqsave(&hp->port.lock, flags);
53521 /* Check and then increment for fast path open. */
53522- if (hp->port.count++ > 0) {
53523+ if (atomic_inc_return(&hp->port.count) > 1) {
53524 spin_unlock_irqrestore(&hp->port.lock, flags);
53525 hvc_kick();
53526 return 0;
53527@@ -397,7 +397,7 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
53528
53529 spin_lock_irqsave(&hp->port.lock, flags);
53530
53531- if (--hp->port.count == 0) {
53532+ if (atomic_dec_return(&hp->port.count) == 0) {
53533 spin_unlock_irqrestore(&hp->port.lock, flags);
53534 /* We are done with the tty pointer now. */
53535 tty_port_tty_set(&hp->port, NULL);
53536@@ -419,9 +419,9 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
53537 */
53538 tty_wait_until_sent_from_close(tty, HVC_CLOSE_WAIT);
53539 } else {
53540- if (hp->port.count < 0)
53541+ if (atomic_read(&hp->port.count) < 0)
53542 printk(KERN_ERR "hvc_close %X: oops, count is %d\n",
53543- hp->vtermno, hp->port.count);
53544+ hp->vtermno, atomic_read(&hp->port.count));
53545 spin_unlock_irqrestore(&hp->port.lock, flags);
53546 }
53547 }
53548@@ -451,12 +451,12 @@ static void hvc_hangup(struct tty_struct *tty)
53549 * open->hangup case this can be called after the final close so prevent
53550 * that from happening for now.
53551 */
53552- if (hp->port.count <= 0) {
53553+ if (atomic_read(&hp->port.count) <= 0) {
53554 spin_unlock_irqrestore(&hp->port.lock, flags);
53555 return;
53556 }
53557
53558- hp->port.count = 0;
53559+ atomic_set(&hp->port.count, 0);
53560 spin_unlock_irqrestore(&hp->port.lock, flags);
53561 tty_port_tty_set(&hp->port, NULL);
53562
53563@@ -504,7 +504,7 @@ static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count
53564 return -EPIPE;
53565
53566 /* FIXME what's this (unprotected) check for? */
53567- if (hp->port.count <= 0)
53568+ if (atomic_read(&hp->port.count) <= 0)
53569 return -EIO;
53570
53571 spin_lock_irqsave(&hp->lock, flags);
53572diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
53573index 81e939e..95ead10 100644
53574--- a/drivers/tty/hvc/hvcs.c
53575+++ b/drivers/tty/hvc/hvcs.c
53576@@ -83,6 +83,7 @@
53577 #include <asm/hvcserver.h>
53578 #include <asm/uaccess.h>
53579 #include <asm/vio.h>
53580+#include <asm/local.h>
53581
53582 /*
53583 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
53584@@ -416,7 +417,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
53585
53586 spin_lock_irqsave(&hvcsd->lock, flags);
53587
53588- if (hvcsd->port.count > 0) {
53589+ if (atomic_read(&hvcsd->port.count) > 0) {
53590 spin_unlock_irqrestore(&hvcsd->lock, flags);
53591 printk(KERN_INFO "HVCS: vterm state unchanged. "
53592 "The hvcs device node is still in use.\n");
53593@@ -1127,7 +1128,7 @@ static int hvcs_install(struct tty_driver *driver, struct tty_struct *tty)
53594 }
53595 }
53596
53597- hvcsd->port.count = 0;
53598+ atomic_set(&hvcsd->port.count, 0);
53599 hvcsd->port.tty = tty;
53600 tty->driver_data = hvcsd;
53601
53602@@ -1180,7 +1181,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
53603 unsigned long flags;
53604
53605 spin_lock_irqsave(&hvcsd->lock, flags);
53606- hvcsd->port.count++;
53607+ atomic_inc(&hvcsd->port.count);
53608 hvcsd->todo_mask |= HVCS_SCHED_READ;
53609 spin_unlock_irqrestore(&hvcsd->lock, flags);
53610
53611@@ -1216,7 +1217,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
53612 hvcsd = tty->driver_data;
53613
53614 spin_lock_irqsave(&hvcsd->lock, flags);
53615- if (--hvcsd->port.count == 0) {
53616+ if (atomic_dec_and_test(&hvcsd->port.count)) {
53617
53618 vio_disable_interrupts(hvcsd->vdev);
53619
53620@@ -1241,10 +1242,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
53621
53622 free_irq(irq, hvcsd);
53623 return;
53624- } else if (hvcsd->port.count < 0) {
53625+ } else if (atomic_read(&hvcsd->port.count) < 0) {
53626 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
53627 " is missmanaged.\n",
53628- hvcsd->vdev->unit_address, hvcsd->port.count);
53629+ hvcsd->vdev->unit_address, atomic_read(&hvcsd->port.count));
53630 }
53631
53632 spin_unlock_irqrestore(&hvcsd->lock, flags);
53633@@ -1266,7 +1267,7 @@ static void hvcs_hangup(struct tty_struct * tty)
53634
53635 spin_lock_irqsave(&hvcsd->lock, flags);
53636 /* Preserve this so that we know how many kref refs to put */
53637- temp_open_count = hvcsd->port.count;
53638+ temp_open_count = atomic_read(&hvcsd->port.count);
53639
53640 /*
53641 * Don't kref put inside the spinlock because the destruction
53642@@ -1281,7 +1282,7 @@ static void hvcs_hangup(struct tty_struct * tty)
53643 tty->driver_data = NULL;
53644 hvcsd->port.tty = NULL;
53645
53646- hvcsd->port.count = 0;
53647+ atomic_set(&hvcsd->port.count, 0);
53648
53649 /* This will drop any buffered data on the floor which is OK in a hangup
53650 * scenario. */
53651@@ -1352,7 +1353,7 @@ static int hvcs_write(struct tty_struct *tty,
53652 * the middle of a write operation? This is a crummy place to do this
53653 * but we want to keep it all in the spinlock.
53654 */
53655- if (hvcsd->port.count <= 0) {
53656+ if (atomic_read(&hvcsd->port.count) <= 0) {
53657 spin_unlock_irqrestore(&hvcsd->lock, flags);
53658 return -ENODEV;
53659 }
53660@@ -1426,7 +1427,7 @@ static int hvcs_write_room(struct tty_struct *tty)
53661 {
53662 struct hvcs_struct *hvcsd = tty->driver_data;
53663
53664- if (!hvcsd || hvcsd->port.count <= 0)
53665+ if (!hvcsd || atomic_read(&hvcsd->port.count) <= 0)
53666 return 0;
53667
53668 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
53669diff --git a/drivers/tty/hvc/hvsi.c b/drivers/tty/hvc/hvsi.c
53670index 4190199..06d5bfa 100644
53671--- a/drivers/tty/hvc/hvsi.c
53672+++ b/drivers/tty/hvc/hvsi.c
53673@@ -85,7 +85,7 @@ struct hvsi_struct {
53674 int n_outbuf;
53675 uint32_t vtermno;
53676 uint32_t virq;
53677- atomic_t seqno; /* HVSI packet sequence number */
53678+ atomic_unchecked_t seqno; /* HVSI packet sequence number */
53679 uint16_t mctrl;
53680 uint8_t state; /* HVSI protocol state */
53681 uint8_t flags;
53682@@ -295,7 +295,7 @@ static int hvsi_version_respond(struct hvsi_struct *hp, uint16_t query_seqno)
53683
53684 packet.hdr.type = VS_QUERY_RESPONSE_PACKET_HEADER;
53685 packet.hdr.len = sizeof(struct hvsi_query_response);
53686- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
53687+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
53688 packet.verb = VSV_SEND_VERSION_NUMBER;
53689 packet.u.version = HVSI_VERSION;
53690 packet.query_seqno = query_seqno+1;
53691@@ -555,7 +555,7 @@ static int hvsi_query(struct hvsi_struct *hp, uint16_t verb)
53692
53693 packet.hdr.type = VS_QUERY_PACKET_HEADER;
53694 packet.hdr.len = sizeof(struct hvsi_query);
53695- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
53696+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
53697 packet.verb = verb;
53698
53699 pr_debug("%s: sending %i bytes\n", __func__, packet.hdr.len);
53700@@ -597,7 +597,7 @@ static int hvsi_set_mctrl(struct hvsi_struct *hp, uint16_t mctrl)
53701 int wrote;
53702
53703 packet.hdr.type = VS_CONTROL_PACKET_HEADER,
53704- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
53705+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
53706 packet.hdr.len = sizeof(struct hvsi_control);
53707 packet.verb = VSV_SET_MODEM_CTL;
53708 packet.mask = HVSI_TSDTR;
53709@@ -680,7 +680,7 @@ static int hvsi_put_chars(struct hvsi_struct *hp, const char *buf, int count)
53710 BUG_ON(count > HVSI_MAX_OUTGOING_DATA);
53711
53712 packet.hdr.type = VS_DATA_PACKET_HEADER;
53713- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
53714+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
53715 packet.hdr.len = count + sizeof(struct hvsi_header);
53716 memcpy(&packet.data, buf, count);
53717
53718@@ -697,7 +697,7 @@ static void hvsi_close_protocol(struct hvsi_struct *hp)
53719 struct hvsi_control packet __ALIGNED__;
53720
53721 packet.hdr.type = VS_CONTROL_PACKET_HEADER;
53722- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
53723+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
53724 packet.hdr.len = 6;
53725 packet.verb = VSV_CLOSE_PROTOCOL;
53726
53727@@ -725,7 +725,7 @@ static int hvsi_open(struct tty_struct *tty, struct file *filp)
53728
53729 tty_port_tty_set(&hp->port, tty);
53730 spin_lock_irqsave(&hp->lock, flags);
53731- hp->port.count++;
53732+ atomic_inc(&hp->port.count);
53733 atomic_set(&hp->seqno, 0);
53734 h_vio_signal(hp->vtermno, VIO_IRQ_ENABLE);
53735 spin_unlock_irqrestore(&hp->lock, flags);
53736@@ -782,7 +782,7 @@ static void hvsi_close(struct tty_struct *tty, struct file *filp)
53737
53738 spin_lock_irqsave(&hp->lock, flags);
53739
53740- if (--hp->port.count == 0) {
53741+ if (atomic_dec_return(&hp->port.count) == 0) {
53742 tty_port_tty_set(&hp->port, NULL);
53743 hp->inbuf_end = hp->inbuf; /* discard remaining partial packets */
53744
53745@@ -815,9 +815,9 @@ static void hvsi_close(struct tty_struct *tty, struct file *filp)
53746
53747 spin_lock_irqsave(&hp->lock, flags);
53748 }
53749- } else if (hp->port.count < 0)
53750+ } else if (atomic_read(&hp->port.count) < 0)
53751 printk(KERN_ERR "hvsi_close %lu: oops, count is %d\n",
53752- hp - hvsi_ports, hp->port.count);
53753+ hp - hvsi_ports, atomic_read(&hp->port.count));
53754
53755 spin_unlock_irqrestore(&hp->lock, flags);
53756 }
53757@@ -832,7 +832,7 @@ static void hvsi_hangup(struct tty_struct *tty)
53758 tty_port_tty_set(&hp->port, NULL);
53759
53760 spin_lock_irqsave(&hp->lock, flags);
53761- hp->port.count = 0;
53762+ atomic_set(&hp->port.count, 0);
53763 hp->n_outbuf = 0;
53764 spin_unlock_irqrestore(&hp->lock, flags);
53765 }
53766diff --git a/drivers/tty/hvc/hvsi_lib.c b/drivers/tty/hvc/hvsi_lib.c
53767index 7ae6c29..05c6dba 100644
53768--- a/drivers/tty/hvc/hvsi_lib.c
53769+++ b/drivers/tty/hvc/hvsi_lib.c
53770@@ -8,7 +8,7 @@
53771
53772 static int hvsi_send_packet(struct hvsi_priv *pv, struct hvsi_header *packet)
53773 {
53774- packet->seqno = cpu_to_be16(atomic_inc_return(&pv->seqno));
53775+ packet->seqno = cpu_to_be16(atomic_inc_return_unchecked(&pv->seqno));
53776
53777 /* Assumes that always succeeds, works in practice */
53778 return pv->put_chars(pv->termno, (char *)packet, packet->len);
53779@@ -20,7 +20,7 @@ static void hvsi_start_handshake(struct hvsi_priv *pv)
53780
53781 /* Reset state */
53782 pv->established = 0;
53783- atomic_set(&pv->seqno, 0);
53784+ atomic_set_unchecked(&pv->seqno, 0);
53785
53786 pr_devel("HVSI@%x: Handshaking started\n", pv->termno);
53787
53788diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
53789index 345cebb..d5a1e9e 100644
53790--- a/drivers/tty/ipwireless/tty.c
53791+++ b/drivers/tty/ipwireless/tty.c
53792@@ -28,6 +28,7 @@
53793 #include <linux/tty_driver.h>
53794 #include <linux/tty_flip.h>
53795 #include <linux/uaccess.h>
53796+#include <asm/local.h>
53797
53798 #include "tty.h"
53799 #include "network.h"
53800@@ -93,10 +94,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
53801 return -ENODEV;
53802
53803 mutex_lock(&tty->ipw_tty_mutex);
53804- if (tty->port.count == 0)
53805+ if (atomic_read(&tty->port.count) == 0)
53806 tty->tx_bytes_queued = 0;
53807
53808- tty->port.count++;
53809+ atomic_inc(&tty->port.count);
53810
53811 tty->port.tty = linux_tty;
53812 linux_tty->driver_data = tty;
53813@@ -112,9 +113,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
53814
53815 static void do_ipw_close(struct ipw_tty *tty)
53816 {
53817- tty->port.count--;
53818-
53819- if (tty->port.count == 0) {
53820+ if (atomic_dec_return(&tty->port.count) == 0) {
53821 struct tty_struct *linux_tty = tty->port.tty;
53822
53823 if (linux_tty != NULL) {
53824@@ -135,7 +134,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
53825 return;
53826
53827 mutex_lock(&tty->ipw_tty_mutex);
53828- if (tty->port.count == 0) {
53829+ if (atomic_read(&tty->port.count) == 0) {
53830 mutex_unlock(&tty->ipw_tty_mutex);
53831 return;
53832 }
53833@@ -158,7 +157,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
53834
53835 mutex_lock(&tty->ipw_tty_mutex);
53836
53837- if (!tty->port.count) {
53838+ if (!atomic_read(&tty->port.count)) {
53839 mutex_unlock(&tty->ipw_tty_mutex);
53840 return;
53841 }
53842@@ -197,7 +196,7 @@ static int ipw_write(struct tty_struct *linux_tty,
53843 return -ENODEV;
53844
53845 mutex_lock(&tty->ipw_tty_mutex);
53846- if (!tty->port.count) {
53847+ if (!atomic_read(&tty->port.count)) {
53848 mutex_unlock(&tty->ipw_tty_mutex);
53849 return -EINVAL;
53850 }
53851@@ -237,7 +236,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
53852 if (!tty)
53853 return -ENODEV;
53854
53855- if (!tty->port.count)
53856+ if (!atomic_read(&tty->port.count))
53857 return -EINVAL;
53858
53859 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
53860@@ -279,7 +278,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
53861 if (!tty)
53862 return 0;
53863
53864- if (!tty->port.count)
53865+ if (!atomic_read(&tty->port.count))
53866 return 0;
53867
53868 return tty->tx_bytes_queued;
53869@@ -360,7 +359,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
53870 if (!tty)
53871 return -ENODEV;
53872
53873- if (!tty->port.count)
53874+ if (!atomic_read(&tty->port.count))
53875 return -EINVAL;
53876
53877 return get_control_lines(tty);
53878@@ -376,7 +375,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
53879 if (!tty)
53880 return -ENODEV;
53881
53882- if (!tty->port.count)
53883+ if (!atomic_read(&tty->port.count))
53884 return -EINVAL;
53885
53886 return set_control_lines(tty, set, clear);
53887@@ -390,7 +389,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
53888 if (!tty)
53889 return -ENODEV;
53890
53891- if (!tty->port.count)
53892+ if (!atomic_read(&tty->port.count))
53893 return -EINVAL;
53894
53895 /* FIXME: Exactly how is the tty object locked here .. */
53896@@ -546,7 +545,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
53897 * are gone */
53898 mutex_lock(&ttyj->ipw_tty_mutex);
53899 }
53900- while (ttyj->port.count)
53901+ while (atomic_read(&ttyj->port.count))
53902 do_ipw_close(ttyj);
53903 ipwireless_disassociate_network_ttys(network,
53904 ttyj->channel_idx);
53905diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c
53906index 1deaca4..c8582d4 100644
53907--- a/drivers/tty/moxa.c
53908+++ b/drivers/tty/moxa.c
53909@@ -1189,7 +1189,7 @@ static int moxa_open(struct tty_struct *tty, struct file *filp)
53910 }
53911
53912 ch = &brd->ports[port % MAX_PORTS_PER_BOARD];
53913- ch->port.count++;
53914+ atomic_inc(&ch->port.count);
53915 tty->driver_data = ch;
53916 tty_port_tty_set(&ch->port, tty);
53917 mutex_lock(&ch->port.mutex);
53918diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
53919index c434376..114ce13 100644
53920--- a/drivers/tty/n_gsm.c
53921+++ b/drivers/tty/n_gsm.c
53922@@ -1644,7 +1644,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
53923 spin_lock_init(&dlci->lock);
53924 mutex_init(&dlci->mutex);
53925 dlci->fifo = &dlci->_fifo;
53926- if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
53927+ if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
53928 kfree(dlci);
53929 return NULL;
53930 }
53931@@ -2958,7 +2958,7 @@ static int gsmtty_open(struct tty_struct *tty, struct file *filp)
53932 struct gsm_dlci *dlci = tty->driver_data;
53933 struct tty_port *port = &dlci->port;
53934
53935- port->count++;
53936+ atomic_inc(&port->count);
53937 tty_port_tty_set(port, tty);
53938
53939 dlci->modem_rx = 0;
53940diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
53941index f44f1ba..a8d5915 100644
53942--- a/drivers/tty/n_tty.c
53943+++ b/drivers/tty/n_tty.c
53944@@ -115,7 +115,7 @@ struct n_tty_data {
53945 int minimum_to_wake;
53946
53947 /* consumer-published */
53948- size_t read_tail;
53949+ size_t read_tail __intentional_overflow(-1);
53950 size_t line_start;
53951
53952 /* protected by output lock */
53953@@ -2517,6 +2517,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
53954 {
53955 *ops = tty_ldisc_N_TTY;
53956 ops->owner = NULL;
53957- ops->refcount = ops->flags = 0;
53958+ atomic_set(&ops->refcount, 0);
53959+ ops->flags = 0;
53960 }
53961 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
53962diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
53963index 9bbdb1d..dc514ee 100644
53964--- a/drivers/tty/pty.c
53965+++ b/drivers/tty/pty.c
53966@@ -789,8 +789,10 @@ static void __init unix98_pty_init(void)
53967 panic("Couldn't register Unix98 pts driver");
53968
53969 /* Now create the /dev/ptmx special device */
53970+ pax_open_kernel();
53971 tty_default_fops(&ptmx_fops);
53972- ptmx_fops.open = ptmx_open;
53973+ *(void **)&ptmx_fops.open = ptmx_open;
53974+ pax_close_kernel();
53975
53976 cdev_init(&ptmx_cdev, &ptmx_fops);
53977 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
53978diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
53979index 383c4c7..d408e21 100644
53980--- a/drivers/tty/rocket.c
53981+++ b/drivers/tty/rocket.c
53982@@ -914,7 +914,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
53983 tty->driver_data = info;
53984 tty_port_tty_set(port, tty);
53985
53986- if (port->count++ == 0) {
53987+ if (atomic_inc_return(&port->count) == 1) {
53988 atomic_inc(&rp_num_ports_open);
53989
53990 #ifdef ROCKET_DEBUG_OPEN
53991@@ -923,7 +923,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
53992 #endif
53993 }
53994 #ifdef ROCKET_DEBUG_OPEN
53995- printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, info->port.count);
53996+ printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, atomic-read(&info->port.count));
53997 #endif
53998
53999 /*
54000@@ -1515,7 +1515,7 @@ static void rp_hangup(struct tty_struct *tty)
54001 spin_unlock_irqrestore(&info->port.lock, flags);
54002 return;
54003 }
54004- if (info->port.count)
54005+ if (atomic_read(&info->port.count))
54006 atomic_dec(&rp_num_ports_open);
54007 clear_bit((info->aiop * 8) + info->chan, (void *) &xmit_flags[info->board]);
54008 spin_unlock_irqrestore(&info->port.lock, flags);
54009diff --git a/drivers/tty/serial/ioc4_serial.c b/drivers/tty/serial/ioc4_serial.c
54010index aa28209..e08fb85 100644
54011--- a/drivers/tty/serial/ioc4_serial.c
54012+++ b/drivers/tty/serial/ioc4_serial.c
54013@@ -437,7 +437,7 @@ struct ioc4_soft {
54014 } is_intr_info[MAX_IOC4_INTR_ENTS];
54015
54016 /* Number of entries active in the above array */
54017- atomic_t is_num_intrs;
54018+ atomic_unchecked_t is_num_intrs;
54019 } is_intr_type[IOC4_NUM_INTR_TYPES];
54020
54021 /* is_ir_lock must be held while
54022@@ -974,7 +974,7 @@ intr_connect(struct ioc4_soft *soft, int type,
54023 BUG_ON(!((type == IOC4_SIO_INTR_TYPE)
54024 || (type == IOC4_OTHER_INTR_TYPE)));
54025
54026- i = atomic_inc_return(&soft-> is_intr_type[type].is_num_intrs) - 1;
54027+ i = atomic_inc_return_unchecked(&soft-> is_intr_type[type].is_num_intrs) - 1;
54028 BUG_ON(!(i < MAX_IOC4_INTR_ENTS || (printk("i %d\n", i), 0)));
54029
54030 /* Save off the lower level interrupt handler */
54031@@ -1001,7 +1001,7 @@ static irqreturn_t ioc4_intr(int irq, void *arg)
54032
54033 soft = arg;
54034 for (intr_type = 0; intr_type < IOC4_NUM_INTR_TYPES; intr_type++) {
54035- num_intrs = (int)atomic_read(
54036+ num_intrs = (int)atomic_read_unchecked(
54037 &soft->is_intr_type[intr_type].is_num_intrs);
54038
54039 this_mir = this_ir = pending_intrs(soft, intr_type);
54040diff --git a/drivers/tty/serial/kgdb_nmi.c b/drivers/tty/serial/kgdb_nmi.c
54041index 6ec7501..265bcbf 100644
54042--- a/drivers/tty/serial/kgdb_nmi.c
54043+++ b/drivers/tty/serial/kgdb_nmi.c
54044@@ -51,7 +51,9 @@ static int kgdb_nmi_console_setup(struct console *co, char *options)
54045 * I/O utilities that messages sent to the console will automatically
54046 * be displayed on the dbg_io.
54047 */
54048- dbg_io_ops->is_console = true;
54049+ pax_open_kernel();
54050+ *(int *)&dbg_io_ops->is_console = true;
54051+ pax_close_kernel();
54052
54053 return 0;
54054 }
54055diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
54056index a260cde..6b2b5ce 100644
54057--- a/drivers/tty/serial/kgdboc.c
54058+++ b/drivers/tty/serial/kgdboc.c
54059@@ -24,8 +24,9 @@
54060 #define MAX_CONFIG_LEN 40
54061
54062 static struct kgdb_io kgdboc_io_ops;
54063+static struct kgdb_io kgdboc_io_ops_console;
54064
54065-/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
54066+/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
54067 static int configured = -1;
54068
54069 static char config[MAX_CONFIG_LEN];
54070@@ -151,6 +152,8 @@ static void cleanup_kgdboc(void)
54071 kgdboc_unregister_kbd();
54072 if (configured == 1)
54073 kgdb_unregister_io_module(&kgdboc_io_ops);
54074+ else if (configured == 2)
54075+ kgdb_unregister_io_module(&kgdboc_io_ops_console);
54076 }
54077
54078 static int configure_kgdboc(void)
54079@@ -160,13 +163,13 @@ static int configure_kgdboc(void)
54080 int err;
54081 char *cptr = config;
54082 struct console *cons;
54083+ int is_console = 0;
54084
54085 err = kgdboc_option_setup(config);
54086 if (err || !strlen(config) || isspace(config[0]))
54087 goto noconfig;
54088
54089 err = -ENODEV;
54090- kgdboc_io_ops.is_console = 0;
54091 kgdb_tty_driver = NULL;
54092
54093 kgdboc_use_kms = 0;
54094@@ -187,7 +190,7 @@ static int configure_kgdboc(void)
54095 int idx;
54096 if (cons->device && cons->device(cons, &idx) == p &&
54097 idx == tty_line) {
54098- kgdboc_io_ops.is_console = 1;
54099+ is_console = 1;
54100 break;
54101 }
54102 cons = cons->next;
54103@@ -197,7 +200,13 @@ static int configure_kgdboc(void)
54104 kgdb_tty_line = tty_line;
54105
54106 do_register:
54107- err = kgdb_register_io_module(&kgdboc_io_ops);
54108+ if (is_console) {
54109+ err = kgdb_register_io_module(&kgdboc_io_ops_console);
54110+ configured = 2;
54111+ } else {
54112+ err = kgdb_register_io_module(&kgdboc_io_ops);
54113+ configured = 1;
54114+ }
54115 if (err)
54116 goto noconfig;
54117
54118@@ -205,8 +214,6 @@ do_register:
54119 if (err)
54120 goto nmi_con_failed;
54121
54122- configured = 1;
54123-
54124 return 0;
54125
54126 nmi_con_failed:
54127@@ -223,7 +230,7 @@ noconfig:
54128 static int __init init_kgdboc(void)
54129 {
54130 /* Already configured? */
54131- if (configured == 1)
54132+ if (configured >= 1)
54133 return 0;
54134
54135 return configure_kgdboc();
54136@@ -272,7 +279,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
54137 if (config[len - 1] == '\n')
54138 config[len - 1] = '\0';
54139
54140- if (configured == 1)
54141+ if (configured >= 1)
54142 cleanup_kgdboc();
54143
54144 /* Go and configure with the new params. */
54145@@ -312,6 +319,15 @@ static struct kgdb_io kgdboc_io_ops = {
54146 .post_exception = kgdboc_post_exp_handler,
54147 };
54148
54149+static struct kgdb_io kgdboc_io_ops_console = {
54150+ .name = "kgdboc",
54151+ .read_char = kgdboc_get_char,
54152+ .write_char = kgdboc_put_char,
54153+ .pre_exception = kgdboc_pre_exp_handler,
54154+ .post_exception = kgdboc_post_exp_handler,
54155+ .is_console = 1
54156+};
54157+
54158 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
54159 /* This is only available if kgdboc is a built in for early debugging */
54160 static int __init kgdboc_early_init(char *opt)
54161diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
54162index 0da0b54..80ae306 100644
54163--- a/drivers/tty/serial/msm_serial.c
54164+++ b/drivers/tty/serial/msm_serial.c
54165@@ -989,7 +989,7 @@ static struct uart_driver msm_uart_driver = {
54166 .cons = MSM_CONSOLE,
54167 };
54168
54169-static atomic_t msm_uart_next_id = ATOMIC_INIT(0);
54170+static atomic_unchecked_t msm_uart_next_id = ATOMIC_INIT(0);
54171
54172 static const struct of_device_id msm_uartdm_table[] = {
54173 { .compatible = "qcom,msm-uartdm-v1.1", .data = (void *)UARTDM_1P1 },
54174@@ -1008,7 +1008,7 @@ static int msm_serial_probe(struct platform_device *pdev)
54175 int irq;
54176
54177 if (pdev->id == -1)
54178- pdev->id = atomic_inc_return(&msm_uart_next_id) - 1;
54179+ pdev->id = atomic_inc_return_unchecked(&msm_uart_next_id) - 1;
54180
54181 if (unlikely(pdev->id < 0 || pdev->id >= UART_NR))
54182 return -ENXIO;
54183diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
54184index c78f43a..22b1dab 100644
54185--- a/drivers/tty/serial/samsung.c
54186+++ b/drivers/tty/serial/samsung.c
54187@@ -478,11 +478,16 @@ static void s3c24xx_serial_shutdown(struct uart_port *port)
54188 }
54189 }
54190
54191+static int s3c64xx_serial_startup(struct uart_port *port);
54192 static int s3c24xx_serial_startup(struct uart_port *port)
54193 {
54194 struct s3c24xx_uart_port *ourport = to_ourport(port);
54195 int ret;
54196
54197+ /* Startup sequence is different for s3c64xx and higher SoC's */
54198+ if (s3c24xx_serial_has_interrupt_mask(port))
54199+ return s3c64xx_serial_startup(port);
54200+
54201 dbg("s3c24xx_serial_startup: port=%p (%08llx,%p)\n",
54202 port, (unsigned long long)port->mapbase, port->membase);
54203
54204@@ -1155,10 +1160,6 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
54205 /* setup info for port */
54206 port->dev = &platdev->dev;
54207
54208- /* Startup sequence is different for s3c64xx and higher SoC's */
54209- if (s3c24xx_serial_has_interrupt_mask(port))
54210- s3c24xx_serial_ops.startup = s3c64xx_serial_startup;
54211-
54212 port->uartclk = 1;
54213
54214 if (cfg->uart_flags & UPF_CONS_FLOW) {
54215diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
54216index 29a7be4..0144e62 100644
54217--- a/drivers/tty/serial/serial_core.c
54218+++ b/drivers/tty/serial/serial_core.c
54219@@ -1343,7 +1343,7 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
54220
54221 pr_debug("uart_close(%d) called\n", uport ? uport->line : -1);
54222
54223- if (!port->count || tty_port_close_start(port, tty, filp) == 0)
54224+ if (!atomic_read(&port->count) || tty_port_close_start(port, tty, filp) == 0)
54225 return;
54226
54227 /*
54228@@ -1470,7 +1470,7 @@ static void uart_hangup(struct tty_struct *tty)
54229 uart_flush_buffer(tty);
54230 uart_shutdown(tty, state);
54231 spin_lock_irqsave(&port->lock, flags);
54232- port->count = 0;
54233+ atomic_set(&port->count, 0);
54234 clear_bit(ASYNCB_NORMAL_ACTIVE, &port->flags);
54235 spin_unlock_irqrestore(&port->lock, flags);
54236 tty_port_tty_set(port, NULL);
54237@@ -1568,7 +1568,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
54238 goto end;
54239 }
54240
54241- port->count++;
54242+ atomic_inc(&port->count);
54243 if (!state->uart_port || state->uart_port->flags & UPF_DEAD) {
54244 retval = -ENXIO;
54245 goto err_dec_count;
54246@@ -1600,7 +1600,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
54247 end:
54248 return retval;
54249 err_dec_count:
54250- port->count--;
54251+ atomic_inc(&port->count);
54252 mutex_unlock(&port->mutex);
54253 goto end;
54254 }
54255diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c
54256index b799170..87dafd5 100644
54257--- a/drivers/tty/synclink.c
54258+++ b/drivers/tty/synclink.c
54259@@ -3090,7 +3090,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
54260
54261 if (debug_level >= DEBUG_LEVEL_INFO)
54262 printk("%s(%d):mgsl_close(%s) entry, count=%d\n",
54263- __FILE__,__LINE__, info->device_name, info->port.count);
54264+ __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
54265
54266 if (tty_port_close_start(&info->port, tty, filp) == 0)
54267 goto cleanup;
54268@@ -3108,7 +3108,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
54269 cleanup:
54270 if (debug_level >= DEBUG_LEVEL_INFO)
54271 printk("%s(%d):mgsl_close(%s) exit, count=%d\n", __FILE__,__LINE__,
54272- tty->driver->name, info->port.count);
54273+ tty->driver->name, atomic_read(&info->port.count));
54274
54275 } /* end of mgsl_close() */
54276
54277@@ -3207,8 +3207,8 @@ static void mgsl_hangup(struct tty_struct *tty)
54278
54279 mgsl_flush_buffer(tty);
54280 shutdown(info);
54281-
54282- info->port.count = 0;
54283+
54284+ atomic_set(&info->port.count, 0);
54285 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
54286 info->port.tty = NULL;
54287
54288@@ -3296,10 +3296,10 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
54289
54290 if (debug_level >= DEBUG_LEVEL_INFO)
54291 printk("%s(%d):block_til_ready before block on %s count=%d\n",
54292- __FILE__,__LINE__, tty->driver->name, port->count );
54293+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
54294
54295 spin_lock_irqsave(&info->irq_spinlock, flags);
54296- port->count--;
54297+ atomic_dec(&port->count);
54298 spin_unlock_irqrestore(&info->irq_spinlock, flags);
54299 port->blocked_open++;
54300
54301@@ -3327,7 +3327,7 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
54302
54303 if (debug_level >= DEBUG_LEVEL_INFO)
54304 printk("%s(%d):block_til_ready blocking on %s count=%d\n",
54305- __FILE__,__LINE__, tty->driver->name, port->count );
54306+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
54307
54308 tty_unlock(tty);
54309 schedule();
54310@@ -3339,12 +3339,12 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
54311
54312 /* FIXME: Racy on hangup during close wait */
54313 if (!tty_hung_up_p(filp))
54314- port->count++;
54315+ atomic_inc(&port->count);
54316 port->blocked_open--;
54317
54318 if (debug_level >= DEBUG_LEVEL_INFO)
54319 printk("%s(%d):block_til_ready after blocking on %s count=%d\n",
54320- __FILE__,__LINE__, tty->driver->name, port->count );
54321+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
54322
54323 if (!retval)
54324 port->flags |= ASYNC_NORMAL_ACTIVE;
54325@@ -3396,7 +3396,7 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
54326
54327 if (debug_level >= DEBUG_LEVEL_INFO)
54328 printk("%s(%d):mgsl_open(%s), old ref count = %d\n",
54329- __FILE__,__LINE__,tty->driver->name, info->port.count);
54330+ __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
54331
54332 /* If port is closing, signal caller to try again */
54333 if (info->port.flags & ASYNC_CLOSING){
54334@@ -3415,10 +3415,10 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
54335 spin_unlock_irqrestore(&info->netlock, flags);
54336 goto cleanup;
54337 }
54338- info->port.count++;
54339+ atomic_inc(&info->port.count);
54340 spin_unlock_irqrestore(&info->netlock, flags);
54341
54342- if (info->port.count == 1) {
54343+ if (atomic_read(&info->port.count) == 1) {
54344 /* 1st open on this device, init hardware */
54345 retval = startup(info);
54346 if (retval < 0)
54347@@ -3442,8 +3442,8 @@ cleanup:
54348 if (retval) {
54349 if (tty->count == 1)
54350 info->port.tty = NULL; /* tty layer will release tty struct */
54351- if(info->port.count)
54352- info->port.count--;
54353+ if (atomic_read(&info->port.count))
54354+ atomic_dec(&info->port.count);
54355 }
54356
54357 return retval;
54358@@ -7661,7 +7661,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
54359 unsigned short new_crctype;
54360
54361 /* return error if TTY interface open */
54362- if (info->port.count)
54363+ if (atomic_read(&info->port.count))
54364 return -EBUSY;
54365
54366 switch (encoding)
54367@@ -7756,7 +7756,7 @@ static int hdlcdev_open(struct net_device *dev)
54368
54369 /* arbitrate between network and tty opens */
54370 spin_lock_irqsave(&info->netlock, flags);
54371- if (info->port.count != 0 || info->netcount != 0) {
54372+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
54373 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
54374 spin_unlock_irqrestore(&info->netlock, flags);
54375 return -EBUSY;
54376@@ -7842,7 +7842,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
54377 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
54378
54379 /* return error if TTY interface open */
54380- if (info->port.count)
54381+ if (atomic_read(&info->port.count))
54382 return -EBUSY;
54383
54384 if (cmd != SIOCWANDEV)
54385diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
54386index 0e8c39b..e0cb171 100644
54387--- a/drivers/tty/synclink_gt.c
54388+++ b/drivers/tty/synclink_gt.c
54389@@ -670,7 +670,7 @@ static int open(struct tty_struct *tty, struct file *filp)
54390 tty->driver_data = info;
54391 info->port.tty = tty;
54392
54393- DBGINFO(("%s open, old ref count = %d\n", info->device_name, info->port.count));
54394+ DBGINFO(("%s open, old ref count = %d\n", info->device_name, atomic_read(&info->port.count)));
54395
54396 /* If port is closing, signal caller to try again */
54397 if (info->port.flags & ASYNC_CLOSING){
54398@@ -691,10 +691,10 @@ static int open(struct tty_struct *tty, struct file *filp)
54399 mutex_unlock(&info->port.mutex);
54400 goto cleanup;
54401 }
54402- info->port.count++;
54403+ atomic_inc(&info->port.count);
54404 spin_unlock_irqrestore(&info->netlock, flags);
54405
54406- if (info->port.count == 1) {
54407+ if (atomic_read(&info->port.count) == 1) {
54408 /* 1st open on this device, init hardware */
54409 retval = startup(info);
54410 if (retval < 0) {
54411@@ -715,8 +715,8 @@ cleanup:
54412 if (retval) {
54413 if (tty->count == 1)
54414 info->port.tty = NULL; /* tty layer will release tty struct */
54415- if(info->port.count)
54416- info->port.count--;
54417+ if(atomic_read(&info->port.count))
54418+ atomic_dec(&info->port.count);
54419 }
54420
54421 DBGINFO(("%s open rc=%d\n", info->device_name, retval));
54422@@ -729,7 +729,7 @@ static void close(struct tty_struct *tty, struct file *filp)
54423
54424 if (sanity_check(info, tty->name, "close"))
54425 return;
54426- DBGINFO(("%s close entry, count=%d\n", info->device_name, info->port.count));
54427+ DBGINFO(("%s close entry, count=%d\n", info->device_name, atomic_read(&info->port.count)));
54428
54429 if (tty_port_close_start(&info->port, tty, filp) == 0)
54430 goto cleanup;
54431@@ -746,7 +746,7 @@ static void close(struct tty_struct *tty, struct file *filp)
54432 tty_port_close_end(&info->port, tty);
54433 info->port.tty = NULL;
54434 cleanup:
54435- DBGINFO(("%s close exit, count=%d\n", tty->driver->name, info->port.count));
54436+ DBGINFO(("%s close exit, count=%d\n", tty->driver->name, atomic_read(&info->port.count)));
54437 }
54438
54439 static void hangup(struct tty_struct *tty)
54440@@ -764,7 +764,7 @@ static void hangup(struct tty_struct *tty)
54441 shutdown(info);
54442
54443 spin_lock_irqsave(&info->port.lock, flags);
54444- info->port.count = 0;
54445+ atomic_set(&info->port.count, 0);
54446 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
54447 info->port.tty = NULL;
54448 spin_unlock_irqrestore(&info->port.lock, flags);
54449@@ -1449,7 +1449,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
54450 unsigned short new_crctype;
54451
54452 /* return error if TTY interface open */
54453- if (info->port.count)
54454+ if (atomic_read(&info->port.count))
54455 return -EBUSY;
54456
54457 DBGINFO(("%s hdlcdev_attach\n", info->device_name));
54458@@ -1544,7 +1544,7 @@ static int hdlcdev_open(struct net_device *dev)
54459
54460 /* arbitrate between network and tty opens */
54461 spin_lock_irqsave(&info->netlock, flags);
54462- if (info->port.count != 0 || info->netcount != 0) {
54463+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
54464 DBGINFO(("%s hdlc_open busy\n", dev->name));
54465 spin_unlock_irqrestore(&info->netlock, flags);
54466 return -EBUSY;
54467@@ -1629,7 +1629,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
54468 DBGINFO(("%s hdlcdev_ioctl\n", dev->name));
54469
54470 /* return error if TTY interface open */
54471- if (info->port.count)
54472+ if (atomic_read(&info->port.count))
54473 return -EBUSY;
54474
54475 if (cmd != SIOCWANDEV)
54476@@ -2413,7 +2413,7 @@ static irqreturn_t slgt_interrupt(int dummy, void *dev_id)
54477 if (port == NULL)
54478 continue;
54479 spin_lock(&port->lock);
54480- if ((port->port.count || port->netcount) &&
54481+ if ((atomic_read(&port->port.count) || port->netcount) &&
54482 port->pending_bh && !port->bh_running &&
54483 !port->bh_requested) {
54484 DBGISR(("%s bh queued\n", port->device_name));
54485@@ -3299,7 +3299,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
54486 add_wait_queue(&port->open_wait, &wait);
54487
54488 spin_lock_irqsave(&info->lock, flags);
54489- port->count--;
54490+ atomic_dec(&port->count);
54491 spin_unlock_irqrestore(&info->lock, flags);
54492 port->blocked_open++;
54493
54494@@ -3335,7 +3335,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
54495 remove_wait_queue(&port->open_wait, &wait);
54496
54497 if (!tty_hung_up_p(filp))
54498- port->count++;
54499+ atomic_inc(&port->count);
54500 port->blocked_open--;
54501
54502 if (!retval)
54503diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c
54504index c3f9091..abe4601 100644
54505--- a/drivers/tty/synclinkmp.c
54506+++ b/drivers/tty/synclinkmp.c
54507@@ -750,7 +750,7 @@ static int open(struct tty_struct *tty, struct file *filp)
54508
54509 if (debug_level >= DEBUG_LEVEL_INFO)
54510 printk("%s(%d):%s open(), old ref count = %d\n",
54511- __FILE__,__LINE__,tty->driver->name, info->port.count);
54512+ __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
54513
54514 /* If port is closing, signal caller to try again */
54515 if (info->port.flags & ASYNC_CLOSING){
54516@@ -769,10 +769,10 @@ static int open(struct tty_struct *tty, struct file *filp)
54517 spin_unlock_irqrestore(&info->netlock, flags);
54518 goto cleanup;
54519 }
54520- info->port.count++;
54521+ atomic_inc(&info->port.count);
54522 spin_unlock_irqrestore(&info->netlock, flags);
54523
54524- if (info->port.count == 1) {
54525+ if (atomic_read(&info->port.count) == 1) {
54526 /* 1st open on this device, init hardware */
54527 retval = startup(info);
54528 if (retval < 0)
54529@@ -796,8 +796,8 @@ cleanup:
54530 if (retval) {
54531 if (tty->count == 1)
54532 info->port.tty = NULL; /* tty layer will release tty struct */
54533- if(info->port.count)
54534- info->port.count--;
54535+ if(atomic_read(&info->port.count))
54536+ atomic_dec(&info->port.count);
54537 }
54538
54539 return retval;
54540@@ -815,7 +815,7 @@ static void close(struct tty_struct *tty, struct file *filp)
54541
54542 if (debug_level >= DEBUG_LEVEL_INFO)
54543 printk("%s(%d):%s close() entry, count=%d\n",
54544- __FILE__,__LINE__, info->device_name, info->port.count);
54545+ __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
54546
54547 if (tty_port_close_start(&info->port, tty, filp) == 0)
54548 goto cleanup;
54549@@ -834,7 +834,7 @@ static void close(struct tty_struct *tty, struct file *filp)
54550 cleanup:
54551 if (debug_level >= DEBUG_LEVEL_INFO)
54552 printk("%s(%d):%s close() exit, count=%d\n", __FILE__,__LINE__,
54553- tty->driver->name, info->port.count);
54554+ tty->driver->name, atomic_read(&info->port.count));
54555 }
54556
54557 /* Called by tty_hangup() when a hangup is signaled.
54558@@ -857,7 +857,7 @@ static void hangup(struct tty_struct *tty)
54559 shutdown(info);
54560
54561 spin_lock_irqsave(&info->port.lock, flags);
54562- info->port.count = 0;
54563+ atomic_set(&info->port.count, 0);
54564 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
54565 info->port.tty = NULL;
54566 spin_unlock_irqrestore(&info->port.lock, flags);
54567@@ -1565,7 +1565,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
54568 unsigned short new_crctype;
54569
54570 /* return error if TTY interface open */
54571- if (info->port.count)
54572+ if (atomic_read(&info->port.count))
54573 return -EBUSY;
54574
54575 switch (encoding)
54576@@ -1660,7 +1660,7 @@ static int hdlcdev_open(struct net_device *dev)
54577
54578 /* arbitrate between network and tty opens */
54579 spin_lock_irqsave(&info->netlock, flags);
54580- if (info->port.count != 0 || info->netcount != 0) {
54581+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
54582 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
54583 spin_unlock_irqrestore(&info->netlock, flags);
54584 return -EBUSY;
54585@@ -1746,7 +1746,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
54586 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
54587
54588 /* return error if TTY interface open */
54589- if (info->port.count)
54590+ if (atomic_read(&info->port.count))
54591 return -EBUSY;
54592
54593 if (cmd != SIOCWANDEV)
54594@@ -2621,7 +2621,7 @@ static irqreturn_t synclinkmp_interrupt(int dummy, void *dev_id)
54595 * do not request bottom half processing if the
54596 * device is not open in a normal mode.
54597 */
54598- if ( port && (port->port.count || port->netcount) &&
54599+ if ( port && (atomic_read(&port->port.count) || port->netcount) &&
54600 port->pending_bh && !port->bh_running &&
54601 !port->bh_requested ) {
54602 if ( debug_level >= DEBUG_LEVEL_ISR )
54603@@ -3318,10 +3318,10 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
54604
54605 if (debug_level >= DEBUG_LEVEL_INFO)
54606 printk("%s(%d):%s block_til_ready() before block, count=%d\n",
54607- __FILE__,__LINE__, tty->driver->name, port->count );
54608+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
54609
54610 spin_lock_irqsave(&info->lock, flags);
54611- port->count--;
54612+ atomic_dec(&port->count);
54613 spin_unlock_irqrestore(&info->lock, flags);
54614 port->blocked_open++;
54615
54616@@ -3349,7 +3349,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
54617
54618 if (debug_level >= DEBUG_LEVEL_INFO)
54619 printk("%s(%d):%s block_til_ready() count=%d\n",
54620- __FILE__,__LINE__, tty->driver->name, port->count );
54621+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
54622
54623 tty_unlock(tty);
54624 schedule();
54625@@ -3359,12 +3359,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
54626 set_current_state(TASK_RUNNING);
54627 remove_wait_queue(&port->open_wait, &wait);
54628 if (!tty_hung_up_p(filp))
54629- port->count++;
54630+ atomic_inc(&port->count);
54631 port->blocked_open--;
54632
54633 if (debug_level >= DEBUG_LEVEL_INFO)
54634 printk("%s(%d):%s block_til_ready() after, count=%d\n",
54635- __FILE__,__LINE__, tty->driver->name, port->count );
54636+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
54637
54638 if (!retval)
54639 port->flags |= ASYNC_NORMAL_ACTIVE;
54640diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
54641index 42bad18..447d7a2 100644
54642--- a/drivers/tty/sysrq.c
54643+++ b/drivers/tty/sysrq.c
54644@@ -1084,7 +1084,7 @@ EXPORT_SYMBOL(unregister_sysrq_key);
54645 static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
54646 size_t count, loff_t *ppos)
54647 {
54648- if (count) {
54649+ if (count && capable(CAP_SYS_ADMIN)) {
54650 char c;
54651
54652 if (get_user(c, buf))
54653diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
54654index 8fbad34..0db0a39 100644
54655--- a/drivers/tty/tty_io.c
54656+++ b/drivers/tty/tty_io.c
54657@@ -3464,7 +3464,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
54658
54659 void tty_default_fops(struct file_operations *fops)
54660 {
54661- *fops = tty_fops;
54662+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
54663 }
54664
54665 /*
54666diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
54667index 2d822aa..a566234 100644
54668--- a/drivers/tty/tty_ldisc.c
54669+++ b/drivers/tty/tty_ldisc.c
54670@@ -71,7 +71,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
54671 raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
54672 tty_ldiscs[disc] = new_ldisc;
54673 new_ldisc->num = disc;
54674- new_ldisc->refcount = 0;
54675+ atomic_set(&new_ldisc->refcount, 0);
54676 raw_spin_unlock_irqrestore(&tty_ldiscs_lock, flags);
54677
54678 return ret;
54679@@ -99,7 +99,7 @@ int tty_unregister_ldisc(int disc)
54680 return -EINVAL;
54681
54682 raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
54683- if (tty_ldiscs[disc]->refcount)
54684+ if (atomic_read(&tty_ldiscs[disc]->refcount))
54685 ret = -EBUSY;
54686 else
54687 tty_ldiscs[disc] = NULL;
54688@@ -120,7 +120,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
54689 if (ldops) {
54690 ret = ERR_PTR(-EAGAIN);
54691 if (try_module_get(ldops->owner)) {
54692- ldops->refcount++;
54693+ atomic_inc(&ldops->refcount);
54694 ret = ldops;
54695 }
54696 }
54697@@ -133,7 +133,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
54698 unsigned long flags;
54699
54700 raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
54701- ldops->refcount--;
54702+ atomic_dec(&ldops->refcount);
54703 module_put(ldops->owner);
54704 raw_spin_unlock_irqrestore(&tty_ldiscs_lock, flags);
54705 }
54706diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
54707index 1b93357..ea9f82c 100644
54708--- a/drivers/tty/tty_port.c
54709+++ b/drivers/tty/tty_port.c
54710@@ -237,7 +237,7 @@ void tty_port_hangup(struct tty_port *port)
54711 unsigned long flags;
54712
54713 spin_lock_irqsave(&port->lock, flags);
54714- port->count = 0;
54715+ atomic_set(&port->count, 0);
54716 port->flags &= ~ASYNC_NORMAL_ACTIVE;
54717 tty = port->tty;
54718 if (tty)
54719@@ -399,7 +399,7 @@ int tty_port_block_til_ready(struct tty_port *port,
54720
54721 /* The port lock protects the port counts */
54722 spin_lock_irqsave(&port->lock, flags);
54723- port->count--;
54724+ atomic_dec(&port->count);
54725 port->blocked_open++;
54726 spin_unlock_irqrestore(&port->lock, flags);
54727
54728@@ -441,7 +441,7 @@ int tty_port_block_til_ready(struct tty_port *port,
54729 we must not mess that up further */
54730 spin_lock_irqsave(&port->lock, flags);
54731 if (!tty_hung_up_p(filp))
54732- port->count++;
54733+ atomic_inc(&port->count);
54734 port->blocked_open--;
54735 if (retval == 0)
54736 port->flags |= ASYNC_NORMAL_ACTIVE;
54737@@ -479,19 +479,19 @@ int tty_port_close_start(struct tty_port *port,
54738 return 0;
54739 }
54740
54741- if (tty->count == 1 && port->count != 1) {
54742+ if (tty->count == 1 && atomic_read(&port->count) != 1) {
54743 printk(KERN_WARNING
54744 "tty_port_close_start: tty->count = 1 port count = %d.\n",
54745- port->count);
54746- port->count = 1;
54747+ atomic_read(&port->count));
54748+ atomic_set(&port->count, 1);
54749 }
54750- if (--port->count < 0) {
54751+ if (atomic_dec_return(&port->count) < 0) {
54752 printk(KERN_WARNING "tty_port_close_start: count = %d\n",
54753- port->count);
54754- port->count = 0;
54755+ atomic_read(&port->count));
54756+ atomic_set(&port->count, 0);
54757 }
54758
54759- if (port->count) {
54760+ if (atomic_read(&port->count)) {
54761 spin_unlock_irqrestore(&port->lock, flags);
54762 return 0;
54763 }
54764@@ -592,7 +592,7 @@ int tty_port_open(struct tty_port *port, struct tty_struct *tty,
54765 struct file *filp)
54766 {
54767 spin_lock_irq(&port->lock);
54768- ++port->count;
54769+ atomic_inc(&port->count);
54770 spin_unlock_irq(&port->lock);
54771 tty_port_tty_set(port, tty);
54772
54773diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
54774index d0e3a44..5f8b754 100644
54775--- a/drivers/tty/vt/keyboard.c
54776+++ b/drivers/tty/vt/keyboard.c
54777@@ -641,6 +641,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
54778 kbd->kbdmode == VC_OFF) &&
54779 value != KVAL(K_SAK))
54780 return; /* SAK is allowed even in raw mode */
54781+
54782+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
54783+ {
54784+ void *func = fn_handler[value];
54785+ if (func == fn_show_state || func == fn_show_ptregs ||
54786+ func == fn_show_mem)
54787+ return;
54788+ }
54789+#endif
54790+
54791 fn_handler[value](vc);
54792 }
54793
54794@@ -1776,9 +1786,6 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
54795 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
54796 return -EFAULT;
54797
54798- if (!capable(CAP_SYS_TTY_CONFIG))
54799- perm = 0;
54800-
54801 switch (cmd) {
54802 case KDGKBENT:
54803 /* Ensure another thread doesn't free it under us */
54804@@ -1793,6 +1800,9 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
54805 spin_unlock_irqrestore(&kbd_event_lock, flags);
54806 return put_user(val, &user_kbe->kb_value);
54807 case KDSKBENT:
54808+ if (!capable(CAP_SYS_TTY_CONFIG))
54809+ perm = 0;
54810+
54811 if (!perm)
54812 return -EPERM;
54813 if (!i && v == K_NOSUCHMAP) {
54814@@ -1883,9 +1893,6 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
54815 int i, j, k;
54816 int ret;
54817
54818- if (!capable(CAP_SYS_TTY_CONFIG))
54819- perm = 0;
54820-
54821 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
54822 if (!kbs) {
54823 ret = -ENOMEM;
54824@@ -1919,6 +1926,9 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
54825 kfree(kbs);
54826 return ((p && *p) ? -EOVERFLOW : 0);
54827 case KDSKBSENT:
54828+ if (!capable(CAP_SYS_TTY_CONFIG))
54829+ perm = 0;
54830+
54831 if (!perm) {
54832 ret = -EPERM;
54833 goto reterr;
54834diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
54835index a673e5b..36e5d32 100644
54836--- a/drivers/uio/uio.c
54837+++ b/drivers/uio/uio.c
54838@@ -25,6 +25,7 @@
54839 #include <linux/kobject.h>
54840 #include <linux/cdev.h>
54841 #include <linux/uio_driver.h>
54842+#include <asm/local.h>
54843
54844 #define UIO_MAX_DEVICES (1U << MINORBITS)
54845
54846@@ -32,7 +33,7 @@ struct uio_device {
54847 struct module *owner;
54848 struct device *dev;
54849 int minor;
54850- atomic_t event;
54851+ atomic_unchecked_t event;
54852 struct fasync_struct *async_queue;
54853 wait_queue_head_t wait;
54854 struct uio_info *info;
54855@@ -243,7 +244,7 @@ static ssize_t event_show(struct device *dev,
54856 struct device_attribute *attr, char *buf)
54857 {
54858 struct uio_device *idev = dev_get_drvdata(dev);
54859- return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
54860+ return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
54861 }
54862 static DEVICE_ATTR_RO(event);
54863
54864@@ -405,7 +406,7 @@ void uio_event_notify(struct uio_info *info)
54865 {
54866 struct uio_device *idev = info->uio_dev;
54867
54868- atomic_inc(&idev->event);
54869+ atomic_inc_unchecked(&idev->event);
54870 wake_up_interruptible(&idev->wait);
54871 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
54872 }
54873@@ -458,7 +459,7 @@ static int uio_open(struct inode *inode, struct file *filep)
54874 }
54875
54876 listener->dev = idev;
54877- listener->event_count = atomic_read(&idev->event);
54878+ listener->event_count = atomic_read_unchecked(&idev->event);
54879 filep->private_data = listener;
54880
54881 if (idev->info->open) {
54882@@ -509,7 +510,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
54883 return -EIO;
54884
54885 poll_wait(filep, &idev->wait, wait);
54886- if (listener->event_count != atomic_read(&idev->event))
54887+ if (listener->event_count != atomic_read_unchecked(&idev->event))
54888 return POLLIN | POLLRDNORM;
54889 return 0;
54890 }
54891@@ -534,7 +535,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
54892 do {
54893 set_current_state(TASK_INTERRUPTIBLE);
54894
54895- event_count = atomic_read(&idev->event);
54896+ event_count = atomic_read_unchecked(&idev->event);
54897 if (event_count != listener->event_count) {
54898 if (copy_to_user(buf, &event_count, count))
54899 retval = -EFAULT;
54900@@ -591,9 +592,13 @@ static ssize_t uio_write(struct file *filep, const char __user *buf,
54901 static int uio_find_mem_index(struct vm_area_struct *vma)
54902 {
54903 struct uio_device *idev = vma->vm_private_data;
54904+ unsigned long size;
54905
54906 if (vma->vm_pgoff < MAX_UIO_MAPS) {
54907- if (idev->info->mem[vma->vm_pgoff].size == 0)
54908+ size = idev->info->mem[vma->vm_pgoff].size;
54909+ if (size == 0)
54910+ return -1;
54911+ if (vma->vm_end - vma->vm_start > size)
54912 return -1;
54913 return (int)vma->vm_pgoff;
54914 }
54915@@ -825,7 +830,7 @@ int __uio_register_device(struct module *owner,
54916 idev->owner = owner;
54917 idev->info = info;
54918 init_waitqueue_head(&idev->wait);
54919- atomic_set(&idev->event, 0);
54920+ atomic_set_unchecked(&idev->event, 0);
54921
54922 ret = uio_get_minor(idev);
54923 if (ret)
54924diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
54925index 813d4d3..a71934f 100644
54926--- a/drivers/usb/atm/cxacru.c
54927+++ b/drivers/usb/atm/cxacru.c
54928@@ -472,7 +472,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
54929 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
54930 if (ret < 2)
54931 return -EINVAL;
54932- if (index < 0 || index > 0x7f)
54933+ if (index > 0x7f)
54934 return -EINVAL;
54935 pos += tmp;
54936
54937diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
54938index dada014..1d0d517 100644
54939--- a/drivers/usb/atm/usbatm.c
54940+++ b/drivers/usb/atm/usbatm.c
54941@@ -331,7 +331,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
54942 if (printk_ratelimit())
54943 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
54944 __func__, vpi, vci);
54945- atomic_inc(&vcc->stats->rx_err);
54946+ atomic_inc_unchecked(&vcc->stats->rx_err);
54947 return;
54948 }
54949
54950@@ -358,7 +358,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
54951 if (length > ATM_MAX_AAL5_PDU) {
54952 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
54953 __func__, length, vcc);
54954- atomic_inc(&vcc->stats->rx_err);
54955+ atomic_inc_unchecked(&vcc->stats->rx_err);
54956 goto out;
54957 }
54958
54959@@ -367,14 +367,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
54960 if (sarb->len < pdu_length) {
54961 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
54962 __func__, pdu_length, sarb->len, vcc);
54963- atomic_inc(&vcc->stats->rx_err);
54964+ atomic_inc_unchecked(&vcc->stats->rx_err);
54965 goto out;
54966 }
54967
54968 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
54969 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
54970 __func__, vcc);
54971- atomic_inc(&vcc->stats->rx_err);
54972+ atomic_inc_unchecked(&vcc->stats->rx_err);
54973 goto out;
54974 }
54975
54976@@ -386,7 +386,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
54977 if (printk_ratelimit())
54978 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
54979 __func__, length);
54980- atomic_inc(&vcc->stats->rx_drop);
54981+ atomic_inc_unchecked(&vcc->stats->rx_drop);
54982 goto out;
54983 }
54984
54985@@ -414,7 +414,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
54986
54987 vcc->push(vcc, skb);
54988
54989- atomic_inc(&vcc->stats->rx);
54990+ atomic_inc_unchecked(&vcc->stats->rx);
54991 out:
54992 skb_trim(sarb, 0);
54993 }
54994@@ -612,7 +612,7 @@ static void usbatm_tx_process(unsigned long data)
54995 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
54996
54997 usbatm_pop(vcc, skb);
54998- atomic_inc(&vcc->stats->tx);
54999+ atomic_inc_unchecked(&vcc->stats->tx);
55000
55001 skb = skb_dequeue(&instance->sndqueue);
55002 }
55003@@ -756,11 +756,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t *pos, char *page
55004 if (!left--)
55005 return sprintf(page,
55006 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
55007- atomic_read(&atm_dev->stats.aal5.tx),
55008- atomic_read(&atm_dev->stats.aal5.tx_err),
55009- atomic_read(&atm_dev->stats.aal5.rx),
55010- atomic_read(&atm_dev->stats.aal5.rx_err),
55011- atomic_read(&atm_dev->stats.aal5.rx_drop));
55012+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
55013+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
55014+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
55015+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
55016+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
55017
55018 if (!left--) {
55019 if (instance->disconnected)
55020diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
55021index 2a3bbdf..91d72cf 100644
55022--- a/drivers/usb/core/devices.c
55023+++ b/drivers/usb/core/devices.c
55024@@ -126,7 +126,7 @@ static const char format_endpt[] =
55025 * time it gets called.
55026 */
55027 static struct device_connect_event {
55028- atomic_t count;
55029+ atomic_unchecked_t count;
55030 wait_queue_head_t wait;
55031 } device_event = {
55032 .count = ATOMIC_INIT(1),
55033@@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
55034
55035 void usbfs_conn_disc_event(void)
55036 {
55037- atomic_add(2, &device_event.count);
55038+ atomic_add_unchecked(2, &device_event.count);
55039 wake_up(&device_event.wait);
55040 }
55041
55042@@ -652,7 +652,7 @@ static unsigned int usb_device_poll(struct file *file,
55043
55044 poll_wait(file, &device_event.wait, wait);
55045
55046- event_count = atomic_read(&device_event.count);
55047+ event_count = atomic_read_unchecked(&device_event.count);
55048 if (file->f_version != event_count) {
55049 file->f_version = event_count;
55050 return POLLIN | POLLRDNORM;
55051diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
55052index 0b59731..46ee7d1 100644
55053--- a/drivers/usb/core/devio.c
55054+++ b/drivers/usb/core/devio.c
55055@@ -187,7 +187,7 @@ static ssize_t usbdev_read(struct file *file, char __user *buf, size_t nbytes,
55056 struct usb_dev_state *ps = file->private_data;
55057 struct usb_device *dev = ps->dev;
55058 ssize_t ret = 0;
55059- unsigned len;
55060+ size_t len;
55061 loff_t pos;
55062 int i;
55063
55064@@ -229,22 +229,22 @@ static ssize_t usbdev_read(struct file *file, char __user *buf, size_t nbytes,
55065 for (i = 0; nbytes && i < dev->descriptor.bNumConfigurations; i++) {
55066 struct usb_config_descriptor *config =
55067 (struct usb_config_descriptor *)dev->rawdescriptors[i];
55068- unsigned int length = le16_to_cpu(config->wTotalLength);
55069+ size_t length = le16_to_cpu(config->wTotalLength);
55070
55071 if (*ppos < pos + length) {
55072
55073 /* The descriptor may claim to be longer than it
55074 * really is. Here is the actual allocated length. */
55075- unsigned alloclen =
55076+ size_t alloclen =
55077 le16_to_cpu(dev->config[i].desc.wTotalLength);
55078
55079- len = length - (*ppos - pos);
55080+ len = length + pos - *ppos;
55081 if (len > nbytes)
55082 len = nbytes;
55083
55084 /* Simply don't write (skip over) unallocated parts */
55085 if (alloclen > (*ppos - pos)) {
55086- alloclen -= (*ppos - pos);
55087+ alloclen = alloclen + pos - *ppos;
55088 if (copy_to_user(buf,
55089 dev->rawdescriptors[i] + (*ppos - pos),
55090 min(len, alloclen))) {
55091diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
55092index 487abcf..06226dc 100644
55093--- a/drivers/usb/core/hcd.c
55094+++ b/drivers/usb/core/hcd.c
55095@@ -1550,7 +1550,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
55096 */
55097 usb_get_urb(urb);
55098 atomic_inc(&urb->use_count);
55099- atomic_inc(&urb->dev->urbnum);
55100+ atomic_inc_unchecked(&urb->dev->urbnum);
55101 usbmon_urb_submit(&hcd->self, urb);
55102
55103 /* NOTE requirements on root-hub callers (usbfs and the hub
55104@@ -1577,7 +1577,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
55105 urb->hcpriv = NULL;
55106 INIT_LIST_HEAD(&urb->urb_list);
55107 atomic_dec(&urb->use_count);
55108- atomic_dec(&urb->dev->urbnum);
55109+ atomic_dec_unchecked(&urb->dev->urbnum);
55110 if (atomic_read(&urb->reject))
55111 wake_up(&usb_kill_urb_queue);
55112 usb_put_urb(urb);
55113diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
55114index dc84915..cdb6624 100644
55115--- a/drivers/usb/core/hub.c
55116+++ b/drivers/usb/core/hub.c
55117@@ -27,6 +27,7 @@
55118 #include <linux/freezer.h>
55119 #include <linux/random.h>
55120 #include <linux/pm_qos.h>
55121+#include <linux/grsecurity.h>
55122
55123 #include <asm/uaccess.h>
55124 #include <asm/byteorder.h>
55125@@ -4662,6 +4663,10 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
55126 goto done;
55127 return;
55128 }
55129+
55130+ if (gr_handle_new_usb())
55131+ goto done;
55132+
55133 if (hub_is_superspeed(hub->hdev))
55134 unit_load = 150;
55135 else
55136diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
55137index 0c8a7fc..c45b40a 100644
55138--- a/drivers/usb/core/message.c
55139+++ b/drivers/usb/core/message.c
55140@@ -128,7 +128,7 @@ static int usb_internal_control_msg(struct usb_device *usb_dev,
55141 * Return: If successful, the number of bytes transferred. Otherwise, a negative
55142 * error number.
55143 */
55144-int usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
55145+int __intentional_overflow(-1) usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
55146 __u8 requesttype, __u16 value, __u16 index, void *data,
55147 __u16 size, int timeout)
55148 {
55149@@ -180,7 +180,7 @@ EXPORT_SYMBOL_GPL(usb_control_msg);
55150 * If successful, 0. Otherwise a negative error number. The number of actual
55151 * bytes transferred will be stored in the @actual_length parameter.
55152 */
55153-int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
55154+int __intentional_overflow(-1) usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
55155 void *data, int len, int *actual_length, int timeout)
55156 {
55157 return usb_bulk_msg(usb_dev, pipe, data, len, actual_length, timeout);
55158@@ -220,7 +220,7 @@ EXPORT_SYMBOL_GPL(usb_interrupt_msg);
55159 * bytes transferred will be stored in the @actual_length parameter.
55160 *
55161 */
55162-int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
55163+int __intentional_overflow(-1) usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
55164 void *data, int len, int *actual_length, int timeout)
55165 {
55166 struct urb *urb;
55167diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
55168index 1236c60..d47a51c 100644
55169--- a/drivers/usb/core/sysfs.c
55170+++ b/drivers/usb/core/sysfs.c
55171@@ -244,7 +244,7 @@ static ssize_t urbnum_show(struct device *dev, struct device_attribute *attr,
55172 struct usb_device *udev;
55173
55174 udev = to_usb_device(dev);
55175- return sprintf(buf, "%d\n", atomic_read(&udev->urbnum));
55176+ return sprintf(buf, "%d\n", atomic_read_unchecked(&udev->urbnum));
55177 }
55178 static DEVICE_ATTR_RO(urbnum);
55179
55180diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
55181index 2dd2362..1135437 100644
55182--- a/drivers/usb/core/usb.c
55183+++ b/drivers/usb/core/usb.c
55184@@ -433,7 +433,7 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
55185 set_dev_node(&dev->dev, dev_to_node(bus->controller));
55186 dev->state = USB_STATE_ATTACHED;
55187 dev->lpm_disable_count = 1;
55188- atomic_set(&dev->urbnum, 0);
55189+ atomic_set_unchecked(&dev->urbnum, 0);
55190
55191 INIT_LIST_HEAD(&dev->ep0.urb_list);
55192 dev->ep0.desc.bLength = USB_DT_ENDPOINT_SIZE;
55193diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
55194index 490a6ca..1f8364d 100644
55195--- a/drivers/usb/dwc3/gadget.c
55196+++ b/drivers/usb/dwc3/gadget.c
55197@@ -615,8 +615,6 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
55198 if (!usb_endpoint_xfer_isoc(desc))
55199 return 0;
55200
55201- memset(&trb_link, 0, sizeof(trb_link));
55202-
55203 /* Link TRB for ISOC. The HWO bit is never reset */
55204 trb_st_hw = &dep->trb_pool[0];
55205
55206diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
55207index 8cfc319..4868255 100644
55208--- a/drivers/usb/early/ehci-dbgp.c
55209+++ b/drivers/usb/early/ehci-dbgp.c
55210@@ -98,7 +98,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
55211
55212 #ifdef CONFIG_KGDB
55213 static struct kgdb_io kgdbdbgp_io_ops;
55214-#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
55215+static struct kgdb_io kgdbdbgp_io_ops_console;
55216+#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
55217 #else
55218 #define dbgp_kgdb_mode (0)
55219 #endif
55220@@ -1043,6 +1044,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
55221 .write_char = kgdbdbgp_write_char,
55222 };
55223
55224+static struct kgdb_io kgdbdbgp_io_ops_console = {
55225+ .name = "kgdbdbgp",
55226+ .read_char = kgdbdbgp_read_char,
55227+ .write_char = kgdbdbgp_write_char,
55228+ .is_console = 1
55229+};
55230+
55231 static int kgdbdbgp_wait_time;
55232
55233 static int __init kgdbdbgp_parse_config(char *str)
55234@@ -1058,8 +1066,10 @@ static int __init kgdbdbgp_parse_config(char *str)
55235 ptr++;
55236 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
55237 }
55238- kgdb_register_io_module(&kgdbdbgp_io_ops);
55239- kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
55240+ if (early_dbgp_console.index != -1)
55241+ kgdb_register_io_module(&kgdbdbgp_io_ops_console);
55242+ else
55243+ kgdb_register_io_module(&kgdbdbgp_io_ops);
55244
55245 return 0;
55246 }
55247diff --git a/drivers/usb/gadget/function/f_uac1.c b/drivers/usb/gadget/function/f_uac1.c
55248index 2b4c82d..06a8ee6 100644
55249--- a/drivers/usb/gadget/function/f_uac1.c
55250+++ b/drivers/usb/gadget/function/f_uac1.c
55251@@ -13,6 +13,7 @@
55252 #include <linux/kernel.h>
55253 #include <linux/device.h>
55254 #include <linux/atomic.h>
55255+#include <linux/module.h>
55256
55257 #include "u_uac1.h"
55258
55259diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c
55260index ad0aca8..8ff84865 100644
55261--- a/drivers/usb/gadget/function/u_serial.c
55262+++ b/drivers/usb/gadget/function/u_serial.c
55263@@ -733,9 +733,9 @@ static int gs_open(struct tty_struct *tty, struct file *file)
55264 spin_lock_irq(&port->port_lock);
55265
55266 /* already open? Great. */
55267- if (port->port.count) {
55268+ if (atomic_read(&port->port.count)) {
55269 status = 0;
55270- port->port.count++;
55271+ atomic_inc(&port->port.count);
55272
55273 /* currently opening/closing? wait ... */
55274 } else if (port->openclose) {
55275@@ -794,7 +794,7 @@ static int gs_open(struct tty_struct *tty, struct file *file)
55276 tty->driver_data = port;
55277 port->port.tty = tty;
55278
55279- port->port.count = 1;
55280+ atomic_set(&port->port.count, 1);
55281 port->openclose = false;
55282
55283 /* if connected, start the I/O stream */
55284@@ -836,11 +836,11 @@ static void gs_close(struct tty_struct *tty, struct file *file)
55285
55286 spin_lock_irq(&port->port_lock);
55287
55288- if (port->port.count != 1) {
55289- if (port->port.count == 0)
55290+ if (atomic_read(&port->port.count) != 1) {
55291+ if (atomic_read(&port->port.count) == 0)
55292 WARN_ON(1);
55293 else
55294- --port->port.count;
55295+ atomic_dec(&port->port.count);
55296 goto exit;
55297 }
55298
55299@@ -850,7 +850,7 @@ static void gs_close(struct tty_struct *tty, struct file *file)
55300 * and sleep if necessary
55301 */
55302 port->openclose = true;
55303- port->port.count = 0;
55304+ atomic_set(&port->port.count, 0);
55305
55306 gser = port->port_usb;
55307 if (gser && gser->disconnect)
55308@@ -1066,7 +1066,7 @@ static int gs_closed(struct gs_port *port)
55309 int cond;
55310
55311 spin_lock_irq(&port->port_lock);
55312- cond = (port->port.count == 0) && !port->openclose;
55313+ cond = (atomic_read(&port->port.count) == 0) && !port->openclose;
55314 spin_unlock_irq(&port->port_lock);
55315 return cond;
55316 }
55317@@ -1209,7 +1209,7 @@ int gserial_connect(struct gserial *gser, u8 port_num)
55318 /* if it's already open, start I/O ... and notify the serial
55319 * protocol about open/close status (connect/disconnect).
55320 */
55321- if (port->port.count) {
55322+ if (atomic_read(&port->port.count)) {
55323 pr_debug("gserial_connect: start ttyGS%d\n", port->port_num);
55324 gs_start_io(port);
55325 if (gser->connect)
55326@@ -1256,7 +1256,7 @@ void gserial_disconnect(struct gserial *gser)
55327
55328 port->port_usb = NULL;
55329 gser->ioport = NULL;
55330- if (port->port.count > 0 || port->openclose) {
55331+ if (atomic_read(&port->port.count) > 0 || port->openclose) {
55332 wake_up_interruptible(&port->drain_wait);
55333 if (port->port.tty)
55334 tty_hangup(port->port.tty);
55335@@ -1272,7 +1272,7 @@ void gserial_disconnect(struct gserial *gser)
55336
55337 /* finally, free any unused/unusable I/O buffers */
55338 spin_lock_irqsave(&port->port_lock, flags);
55339- if (port->port.count == 0 && !port->openclose)
55340+ if (atomic_read(&port->port.count) == 0 && !port->openclose)
55341 gs_buf_free(&port->port_write_buf);
55342 gs_free_requests(gser->out, &port->read_pool, NULL);
55343 gs_free_requests(gser->out, &port->read_queue, NULL);
55344diff --git a/drivers/usb/gadget/function/u_uac1.c b/drivers/usb/gadget/function/u_uac1.c
55345index 7a55fea..cc0ed4f 100644
55346--- a/drivers/usb/gadget/function/u_uac1.c
55347+++ b/drivers/usb/gadget/function/u_uac1.c
55348@@ -16,6 +16,7 @@
55349 #include <linux/ctype.h>
55350 #include <linux/random.h>
55351 #include <linux/syscalls.h>
55352+#include <linux/module.h>
55353
55354 #include "u_uac1.h"
55355
55356diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
55357index 6130b75..3b60008 100644
55358--- a/drivers/usb/host/ehci-hub.c
55359+++ b/drivers/usb/host/ehci-hub.c
55360@@ -771,7 +771,7 @@ static struct urb *request_single_step_set_feature_urb(
55361 urb->transfer_flags = URB_DIR_IN;
55362 usb_get_urb(urb);
55363 atomic_inc(&urb->use_count);
55364- atomic_inc(&urb->dev->urbnum);
55365+ atomic_inc_unchecked(&urb->dev->urbnum);
55366 urb->setup_dma = dma_map_single(
55367 hcd->self.controller,
55368 urb->setup_packet,
55369@@ -838,7 +838,7 @@ static int ehset_single_step_set_feature(struct usb_hcd *hcd, int port)
55370 urb->status = -EINPROGRESS;
55371 usb_get_urb(urb);
55372 atomic_inc(&urb->use_count);
55373- atomic_inc(&urb->dev->urbnum);
55374+ atomic_inc_unchecked(&urb->dev->urbnum);
55375 retval = submit_single_step_set_feature(hcd, urb, 0);
55376 if (!retval && !wait_for_completion_timeout(&done,
55377 msecs_to_jiffies(2000))) {
55378diff --git a/drivers/usb/host/hwa-hc.c b/drivers/usb/host/hwa-hc.c
55379index d0d8fad..668ef7b 100644
55380--- a/drivers/usb/host/hwa-hc.c
55381+++ b/drivers/usb/host/hwa-hc.c
55382@@ -337,7 +337,10 @@ static int __hwahc_op_bwa_set(struct wusbhc *wusbhc, s8 stream_index,
55383 struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
55384 struct wahc *wa = &hwahc->wa;
55385 struct device *dev = &wa->usb_iface->dev;
55386- u8 mas_le[UWB_NUM_MAS/8];
55387+ u8 *mas_le = kmalloc(UWB_NUM_MAS/8, GFP_KERNEL);
55388+
55389+ if (mas_le == NULL)
55390+ return -ENOMEM;
55391
55392 /* Set the stream index */
55393 result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
55394@@ -356,10 +359,12 @@ static int __hwahc_op_bwa_set(struct wusbhc *wusbhc, s8 stream_index,
55395 WUSB_REQ_SET_WUSB_MAS,
55396 USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
55397 0, wa->usb_iface->cur_altsetting->desc.bInterfaceNumber,
55398- mas_le, 32, USB_CTRL_SET_TIMEOUT);
55399+ mas_le, UWB_NUM_MAS/8, USB_CTRL_SET_TIMEOUT);
55400 if (result < 0)
55401 dev_err(dev, "Cannot set WUSB MAS allocation: %d\n", result);
55402 out:
55403+ kfree(mas_le);
55404+
55405 return result;
55406 }
55407
55408diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
55409index b3d245e..99549ed 100644
55410--- a/drivers/usb/misc/appledisplay.c
55411+++ b/drivers/usb/misc/appledisplay.c
55412@@ -84,7 +84,7 @@ struct appledisplay {
55413 struct mutex sysfslock; /* concurrent read and write */
55414 };
55415
55416-static atomic_t count_displays = ATOMIC_INIT(0);
55417+static atomic_unchecked_t count_displays = ATOMIC_INIT(0);
55418 static struct workqueue_struct *wq;
55419
55420 static void appledisplay_complete(struct urb *urb)
55421@@ -288,7 +288,7 @@ static int appledisplay_probe(struct usb_interface *iface,
55422
55423 /* Register backlight device */
55424 snprintf(bl_name, sizeof(bl_name), "appledisplay%d",
55425- atomic_inc_return(&count_displays) - 1);
55426+ atomic_inc_return_unchecked(&count_displays) - 1);
55427 memset(&props, 0, sizeof(struct backlight_properties));
55428 props.type = BACKLIGHT_RAW;
55429 props.max_brightness = 0xff;
55430diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
55431index 8d7fc48..01c4986 100644
55432--- a/drivers/usb/serial/console.c
55433+++ b/drivers/usb/serial/console.c
55434@@ -123,7 +123,7 @@ static int usb_console_setup(struct console *co, char *options)
55435
55436 info->port = port;
55437
55438- ++port->port.count;
55439+ atomic_inc(&port->port.count);
55440 if (!test_bit(ASYNCB_INITIALIZED, &port->port.flags)) {
55441 if (serial->type->set_termios) {
55442 /*
55443@@ -167,7 +167,7 @@ static int usb_console_setup(struct console *co, char *options)
55444 }
55445 /* Now that any required fake tty operations are completed restore
55446 * the tty port count */
55447- --port->port.count;
55448+ atomic_dec(&port->port.count);
55449 /* The console is special in terms of closing the device so
55450 * indicate this port is now acting as a system console. */
55451 port->port.console = 1;
55452@@ -180,7 +180,7 @@ static int usb_console_setup(struct console *co, char *options)
55453 free_tty:
55454 kfree(tty);
55455 reset_open_count:
55456- port->port.count = 0;
55457+ atomic_set(&port->port.count, 0);
55458 usb_autopm_put_interface(serial->interface);
55459 error_get_interface:
55460 usb_serial_put(serial);
55461@@ -191,7 +191,7 @@ static int usb_console_setup(struct console *co, char *options)
55462 static void usb_console_write(struct console *co,
55463 const char *buf, unsigned count)
55464 {
55465- static struct usbcons_info *info = &usbcons_info;
55466+ struct usbcons_info *info = &usbcons_info;
55467 struct usb_serial_port *port = info->port;
55468 struct usb_serial *serial;
55469 int retval = -ENODEV;
55470diff --git a/drivers/usb/storage/usb.h b/drivers/usb/storage/usb.h
55471index 307e339..6aa97cb 100644
55472--- a/drivers/usb/storage/usb.h
55473+++ b/drivers/usb/storage/usb.h
55474@@ -63,7 +63,7 @@ struct us_unusual_dev {
55475 __u8 useProtocol;
55476 __u8 useTransport;
55477 int (*initFunction)(struct us_data *);
55478-};
55479+} __do_const;
55480
55481
55482 /* Dynamic bitflag definitions (us->dflags): used in set_bit() etc. */
55483diff --git a/drivers/usb/usbip/vhci.h b/drivers/usb/usbip/vhci.h
55484index a863a98..d272795 100644
55485--- a/drivers/usb/usbip/vhci.h
55486+++ b/drivers/usb/usbip/vhci.h
55487@@ -83,7 +83,7 @@ struct vhci_hcd {
55488 unsigned resuming:1;
55489 unsigned long re_timeout;
55490
55491- atomic_t seqnum;
55492+ atomic_unchecked_t seqnum;
55493
55494 /*
55495 * NOTE:
55496diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
55497index c02374b..32d47a9 100644
55498--- a/drivers/usb/usbip/vhci_hcd.c
55499+++ b/drivers/usb/usbip/vhci_hcd.c
55500@@ -439,7 +439,7 @@ static void vhci_tx_urb(struct urb *urb)
55501
55502 spin_lock(&vdev->priv_lock);
55503
55504- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
55505+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
55506 if (priv->seqnum == 0xffff)
55507 dev_info(&urb->dev->dev, "seqnum max\n");
55508
55509@@ -686,7 +686,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
55510 return -ENOMEM;
55511 }
55512
55513- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
55514+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
55515 if (unlink->seqnum == 0xffff)
55516 pr_info("seqnum max\n");
55517
55518@@ -891,7 +891,7 @@ static int vhci_start(struct usb_hcd *hcd)
55519 vdev->rhport = rhport;
55520 }
55521
55522- atomic_set(&vhci->seqnum, 0);
55523+ atomic_set_unchecked(&vhci->seqnum, 0);
55524 spin_lock_init(&vhci->lock);
55525
55526 hcd->power_budget = 0; /* no limit */
55527diff --git a/drivers/usb/usbip/vhci_rx.c b/drivers/usb/usbip/vhci_rx.c
55528index 00e4a54..d676f85 100644
55529--- a/drivers/usb/usbip/vhci_rx.c
55530+++ b/drivers/usb/usbip/vhci_rx.c
55531@@ -80,7 +80,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
55532 if (!urb) {
55533 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
55534 pr_info("max seqnum %d\n",
55535- atomic_read(&the_controller->seqnum));
55536+ atomic_read_unchecked(&the_controller->seqnum));
55537 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
55538 return;
55539 }
55540diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
55541index f2a8d29..7bc3fe7 100644
55542--- a/drivers/usb/wusbcore/wa-hc.h
55543+++ b/drivers/usb/wusbcore/wa-hc.h
55544@@ -240,7 +240,7 @@ struct wahc {
55545 spinlock_t xfer_list_lock;
55546 struct work_struct xfer_enqueue_work;
55547 struct work_struct xfer_error_work;
55548- atomic_t xfer_id_count;
55549+ atomic_unchecked_t xfer_id_count;
55550
55551 kernel_ulong_t quirks;
55552 };
55553@@ -305,7 +305,7 @@ static inline void wa_init(struct wahc *wa)
55554 INIT_WORK(&wa->xfer_enqueue_work, wa_urb_enqueue_run);
55555 INIT_WORK(&wa->xfer_error_work, wa_process_errored_transfers_run);
55556 wa->dto_in_use = 0;
55557- atomic_set(&wa->xfer_id_count, 1);
55558+ atomic_set_unchecked(&wa->xfer_id_count, 1);
55559 /* init the buf in URBs */
55560 for (index = 0; index < WA_MAX_BUF_IN_URBS; ++index)
55561 usb_init_urb(&(wa->buf_in_urbs[index]));
55562diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
55563index e279015..c2d0dae 100644
55564--- a/drivers/usb/wusbcore/wa-xfer.c
55565+++ b/drivers/usb/wusbcore/wa-xfer.c
55566@@ -314,7 +314,7 @@ static void wa_xfer_completion(struct wa_xfer *xfer)
55567 */
55568 static void wa_xfer_id_init(struct wa_xfer *xfer)
55569 {
55570- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
55571+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
55572 }
55573
55574 /* Return the xfer's ID. */
55575diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
55576index f018d8d..ccab63f 100644
55577--- a/drivers/vfio/vfio.c
55578+++ b/drivers/vfio/vfio.c
55579@@ -481,7 +481,7 @@ static int vfio_group_nb_add_dev(struct vfio_group *group, struct device *dev)
55580 return 0;
55581
55582 /* TODO Prevent device auto probing */
55583- WARN("Device %s added to live group %d!\n", dev_name(dev),
55584+ WARN(1, "Device %s added to live group %d!\n", dev_name(dev),
55585 iommu_group_id(group->iommu_group));
55586
55587 return 0;
55588diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c
55589index 5174eba..451e6bc 100644
55590--- a/drivers/vhost/vringh.c
55591+++ b/drivers/vhost/vringh.c
55592@@ -530,17 +530,17 @@ static inline void __vringh_notify_disable(struct vringh *vrh,
55593 /* Userspace access helpers: in this case, addresses are really userspace. */
55594 static inline int getu16_user(u16 *val, const u16 *p)
55595 {
55596- return get_user(*val, (__force u16 __user *)p);
55597+ return get_user(*val, (u16 __force_user *)p);
55598 }
55599
55600 static inline int putu16_user(u16 *p, u16 val)
55601 {
55602- return put_user(val, (__force u16 __user *)p);
55603+ return put_user(val, (u16 __force_user *)p);
55604 }
55605
55606 static inline int copydesc_user(void *dst, const void *src, size_t len)
55607 {
55608- return copy_from_user(dst, (__force void __user *)src, len) ?
55609+ return copy_from_user(dst, (void __force_user *)src, len) ?
55610 -EFAULT : 0;
55611 }
55612
55613@@ -548,19 +548,19 @@ static inline int putused_user(struct vring_used_elem *dst,
55614 const struct vring_used_elem *src,
55615 unsigned int num)
55616 {
55617- return copy_to_user((__force void __user *)dst, src,
55618+ return copy_to_user((void __force_user *)dst, src,
55619 sizeof(*dst) * num) ? -EFAULT : 0;
55620 }
55621
55622 static inline int xfer_from_user(void *src, void *dst, size_t len)
55623 {
55624- return copy_from_user(dst, (__force void __user *)src, len) ?
55625+ return copy_from_user(dst, (void __force_user *)src, len) ?
55626 -EFAULT : 0;
55627 }
55628
55629 static inline int xfer_to_user(void *dst, void *src, size_t len)
55630 {
55631- return copy_to_user((__force void __user *)dst, src, len) ?
55632+ return copy_to_user((void __force_user *)dst, src, len) ?
55633 -EFAULT : 0;
55634 }
55635
55636@@ -596,9 +596,9 @@ int vringh_init_user(struct vringh *vrh, u32 features,
55637 vrh->last_used_idx = 0;
55638 vrh->vring.num = num;
55639 /* vring expects kernel addresses, but only used via accessors. */
55640- vrh->vring.desc = (__force struct vring_desc *)desc;
55641- vrh->vring.avail = (__force struct vring_avail *)avail;
55642- vrh->vring.used = (__force struct vring_used *)used;
55643+ vrh->vring.desc = (__force_kernel struct vring_desc *)desc;
55644+ vrh->vring.avail = (__force_kernel struct vring_avail *)avail;
55645+ vrh->vring.used = (__force_kernel struct vring_used *)used;
55646 return 0;
55647 }
55648 EXPORT_SYMBOL(vringh_init_user);
55649@@ -800,7 +800,7 @@ static inline int getu16_kern(u16 *val, const u16 *p)
55650
55651 static inline int putu16_kern(u16 *p, u16 val)
55652 {
55653- ACCESS_ONCE(*p) = val;
55654+ ACCESS_ONCE_RW(*p) = val;
55655 return 0;
55656 }
55657
55658diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c
55659index 84a110a..96312c3 100644
55660--- a/drivers/video/backlight/kb3886_bl.c
55661+++ b/drivers/video/backlight/kb3886_bl.c
55662@@ -78,7 +78,7 @@ static struct kb3886bl_machinfo *bl_machinfo;
55663 static unsigned long kb3886bl_flags;
55664 #define KB3886BL_SUSPENDED 0x01
55665
55666-static struct dmi_system_id kb3886bl_device_table[] __initdata = {
55667+static const struct dmi_system_id kb3886bl_device_table[] __initconst = {
55668 {
55669 .ident = "Sahara Touch-iT",
55670 .matches = {
55671diff --git a/drivers/video/fbdev/arcfb.c b/drivers/video/fbdev/arcfb.c
55672index 1b0b233..6f34c2c 100644
55673--- a/drivers/video/fbdev/arcfb.c
55674+++ b/drivers/video/fbdev/arcfb.c
55675@@ -458,7 +458,7 @@ static ssize_t arcfb_write(struct fb_info *info, const char __user *buf,
55676 return -ENOSPC;
55677
55678 err = 0;
55679- if ((count + p) > fbmemlength) {
55680+ if (count > (fbmemlength - p)) {
55681 count = fbmemlength - p;
55682 err = -ENOSPC;
55683 }
55684diff --git a/drivers/video/fbdev/aty/aty128fb.c b/drivers/video/fbdev/aty/aty128fb.c
55685index ff60701..814b973 100644
55686--- a/drivers/video/fbdev/aty/aty128fb.c
55687+++ b/drivers/video/fbdev/aty/aty128fb.c
55688@@ -149,7 +149,7 @@ enum {
55689 };
55690
55691 /* Must match above enum */
55692-static char * const r128_family[] = {
55693+static const char * const r128_family[] = {
55694 "AGP",
55695 "PCI",
55696 "PRO AGP",
55697diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c
55698index 37ec09b..98f8862 100644
55699--- a/drivers/video/fbdev/aty/atyfb_base.c
55700+++ b/drivers/video/fbdev/aty/atyfb_base.c
55701@@ -1326,10 +1326,14 @@ static int atyfb_set_par(struct fb_info *info)
55702 par->accel_flags = var->accel_flags; /* hack */
55703
55704 if (var->accel_flags) {
55705- info->fbops->fb_sync = atyfb_sync;
55706+ pax_open_kernel();
55707+ *(void **)&info->fbops->fb_sync = atyfb_sync;
55708+ pax_close_kernel();
55709 info->flags &= ~FBINFO_HWACCEL_DISABLED;
55710 } else {
55711- info->fbops->fb_sync = NULL;
55712+ pax_open_kernel();
55713+ *(void **)&info->fbops->fb_sync = NULL;
55714+ pax_close_kernel();
55715 info->flags |= FBINFO_HWACCEL_DISABLED;
55716 }
55717
55718diff --git a/drivers/video/fbdev/aty/mach64_cursor.c b/drivers/video/fbdev/aty/mach64_cursor.c
55719index 2fa0317..4983f2a 100644
55720--- a/drivers/video/fbdev/aty/mach64_cursor.c
55721+++ b/drivers/video/fbdev/aty/mach64_cursor.c
55722@@ -8,6 +8,7 @@
55723 #include "../core/fb_draw.h"
55724
55725 #include <asm/io.h>
55726+#include <asm/pgtable.h>
55727
55728 #ifdef __sparc__
55729 #include <asm/fbio.h>
55730@@ -218,7 +219,9 @@ int aty_init_cursor(struct fb_info *info)
55731 info->sprite.buf_align = 16; /* and 64 lines tall. */
55732 info->sprite.flags = FB_PIXMAP_IO;
55733
55734- info->fbops->fb_cursor = atyfb_cursor;
55735+ pax_open_kernel();
55736+ *(void **)&info->fbops->fb_cursor = atyfb_cursor;
55737+ pax_close_kernel();
55738
55739 return 0;
55740 }
55741diff --git a/drivers/video/fbdev/core/fb_defio.c b/drivers/video/fbdev/core/fb_defio.c
55742index 900aa4e..6d49418 100644
55743--- a/drivers/video/fbdev/core/fb_defio.c
55744+++ b/drivers/video/fbdev/core/fb_defio.c
55745@@ -206,7 +206,9 @@ void fb_deferred_io_init(struct fb_info *info)
55746
55747 BUG_ON(!fbdefio);
55748 mutex_init(&fbdefio->lock);
55749- info->fbops->fb_mmap = fb_deferred_io_mmap;
55750+ pax_open_kernel();
55751+ *(void **)&info->fbops->fb_mmap = fb_deferred_io_mmap;
55752+ pax_close_kernel();
55753 INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work);
55754 INIT_LIST_HEAD(&fbdefio->pagelist);
55755 if (fbdefio->delay == 0) /* set a default of 1 s */
55756@@ -237,7 +239,7 @@ void fb_deferred_io_cleanup(struct fb_info *info)
55757 page->mapping = NULL;
55758 }
55759
55760- info->fbops->fb_mmap = NULL;
55761+ *(void **)&info->fbops->fb_mmap = NULL;
55762 mutex_destroy(&fbdefio->lock);
55763 }
55764 EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);
55765diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
55766index b5e85f6..290f8c7 100644
55767--- a/drivers/video/fbdev/core/fbmem.c
55768+++ b/drivers/video/fbdev/core/fbmem.c
55769@@ -1301,7 +1301,7 @@ static int do_fscreeninfo_to_user(struct fb_fix_screeninfo *fix,
55770 __u32 data;
55771 int err;
55772
55773- err = copy_to_user(&fix32->id, &fix->id, sizeof(fix32->id));
55774+ err = copy_to_user(fix32->id, &fix->id, sizeof(fix32->id));
55775
55776 data = (__u32) (unsigned long) fix->smem_start;
55777 err |= put_user(data, &fix32->smem_start);
55778diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c
55779index 4254336..282567e 100644
55780--- a/drivers/video/fbdev/hyperv_fb.c
55781+++ b/drivers/video/fbdev/hyperv_fb.c
55782@@ -240,7 +240,7 @@ static uint screen_fb_size;
55783 static inline int synthvid_send(struct hv_device *hdev,
55784 struct synthvid_msg *msg)
55785 {
55786- static atomic64_t request_id = ATOMIC64_INIT(0);
55787+ static atomic64_unchecked_t request_id = ATOMIC64_INIT(0);
55788 int ret;
55789
55790 msg->pipe_hdr.type = PIPE_MSG_DATA;
55791@@ -248,7 +248,7 @@ static inline int synthvid_send(struct hv_device *hdev,
55792
55793 ret = vmbus_sendpacket(hdev->channel, msg,
55794 msg->vid_hdr.size + sizeof(struct pipe_msg_hdr),
55795- atomic64_inc_return(&request_id),
55796+ atomic64_inc_return_unchecked(&request_id),
55797 VM_PKT_DATA_INBAND, 0);
55798
55799 if (ret)
55800diff --git a/drivers/video/fbdev/i810/i810_accel.c b/drivers/video/fbdev/i810/i810_accel.c
55801index 7672d2e..b56437f 100644
55802--- a/drivers/video/fbdev/i810/i810_accel.c
55803+++ b/drivers/video/fbdev/i810/i810_accel.c
55804@@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
55805 }
55806 }
55807 printk("ringbuffer lockup!!!\n");
55808+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
55809 i810_report_error(mmio);
55810 par->dev_flags |= LOCKUP;
55811 info->pixmap.scan_align = 1;
55812diff --git a/drivers/video/fbdev/matrox/matroxfb_DAC1064.c b/drivers/video/fbdev/matrox/matroxfb_DAC1064.c
55813index a01147f..5d896f8 100644
55814--- a/drivers/video/fbdev/matrox/matroxfb_DAC1064.c
55815+++ b/drivers/video/fbdev/matrox/matroxfb_DAC1064.c
55816@@ -1088,14 +1088,20 @@ static void MGAG100_restore(struct matrox_fb_info *minfo)
55817
55818 #ifdef CONFIG_FB_MATROX_MYSTIQUE
55819 struct matrox_switch matrox_mystique = {
55820- MGA1064_preinit, MGA1064_reset, MGA1064_init, MGA1064_restore,
55821+ .preinit = MGA1064_preinit,
55822+ .reset = MGA1064_reset,
55823+ .init = MGA1064_init,
55824+ .restore = MGA1064_restore,
55825 };
55826 EXPORT_SYMBOL(matrox_mystique);
55827 #endif
55828
55829 #ifdef CONFIG_FB_MATROX_G
55830 struct matrox_switch matrox_G100 = {
55831- MGAG100_preinit, MGAG100_reset, MGAG100_init, MGAG100_restore,
55832+ .preinit = MGAG100_preinit,
55833+ .reset = MGAG100_reset,
55834+ .init = MGAG100_init,
55835+ .restore = MGAG100_restore,
55836 };
55837 EXPORT_SYMBOL(matrox_G100);
55838 #endif
55839diff --git a/drivers/video/fbdev/matrox/matroxfb_Ti3026.c b/drivers/video/fbdev/matrox/matroxfb_Ti3026.c
55840index 195ad7c..09743fc 100644
55841--- a/drivers/video/fbdev/matrox/matroxfb_Ti3026.c
55842+++ b/drivers/video/fbdev/matrox/matroxfb_Ti3026.c
55843@@ -738,7 +738,10 @@ static int Ti3026_preinit(struct matrox_fb_info *minfo)
55844 }
55845
55846 struct matrox_switch matrox_millennium = {
55847- Ti3026_preinit, Ti3026_reset, Ti3026_init, Ti3026_restore
55848+ .preinit = Ti3026_preinit,
55849+ .reset = Ti3026_reset,
55850+ .init = Ti3026_init,
55851+ .restore = Ti3026_restore
55852 };
55853 EXPORT_SYMBOL(matrox_millennium);
55854 #endif
55855diff --git a/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c b/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c
55856index fe92eed..106e085 100644
55857--- a/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c
55858+++ b/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c
55859@@ -312,14 +312,18 @@ void mb862xxfb_init_accel(struct fb_info *info, int xres)
55860 struct mb862xxfb_par *par = info->par;
55861
55862 if (info->var.bits_per_pixel == 32) {
55863- info->fbops->fb_fillrect = cfb_fillrect;
55864- info->fbops->fb_copyarea = cfb_copyarea;
55865- info->fbops->fb_imageblit = cfb_imageblit;
55866+ pax_open_kernel();
55867+ *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
55868+ *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
55869+ *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
55870+ pax_close_kernel();
55871 } else {
55872 outreg(disp, GC_L0EM, 3);
55873- info->fbops->fb_fillrect = mb86290fb_fillrect;
55874- info->fbops->fb_copyarea = mb86290fb_copyarea;
55875- info->fbops->fb_imageblit = mb86290fb_imageblit;
55876+ pax_open_kernel();
55877+ *(void **)&info->fbops->fb_fillrect = mb86290fb_fillrect;
55878+ *(void **)&info->fbops->fb_copyarea = mb86290fb_copyarea;
55879+ *(void **)&info->fbops->fb_imageblit = mb86290fb_imageblit;
55880+ pax_close_kernel();
55881 }
55882 outreg(draw, GDC_REG_DRAW_BASE, 0);
55883 outreg(draw, GDC_REG_MODE_MISC, 0x8000);
55884diff --git a/drivers/video/fbdev/nvidia/nvidia.c b/drivers/video/fbdev/nvidia/nvidia.c
55885index def0412..fed6529 100644
55886--- a/drivers/video/fbdev/nvidia/nvidia.c
55887+++ b/drivers/video/fbdev/nvidia/nvidia.c
55888@@ -669,19 +669,23 @@ static int nvidiafb_set_par(struct fb_info *info)
55889 info->fix.line_length = (info->var.xres_virtual *
55890 info->var.bits_per_pixel) >> 3;
55891 if (info->var.accel_flags) {
55892- info->fbops->fb_imageblit = nvidiafb_imageblit;
55893- info->fbops->fb_fillrect = nvidiafb_fillrect;
55894- info->fbops->fb_copyarea = nvidiafb_copyarea;
55895- info->fbops->fb_sync = nvidiafb_sync;
55896+ pax_open_kernel();
55897+ *(void **)&info->fbops->fb_imageblit = nvidiafb_imageblit;
55898+ *(void **)&info->fbops->fb_fillrect = nvidiafb_fillrect;
55899+ *(void **)&info->fbops->fb_copyarea = nvidiafb_copyarea;
55900+ *(void **)&info->fbops->fb_sync = nvidiafb_sync;
55901+ pax_close_kernel();
55902 info->pixmap.scan_align = 4;
55903 info->flags &= ~FBINFO_HWACCEL_DISABLED;
55904 info->flags |= FBINFO_READS_FAST;
55905 NVResetGraphics(info);
55906 } else {
55907- info->fbops->fb_imageblit = cfb_imageblit;
55908- info->fbops->fb_fillrect = cfb_fillrect;
55909- info->fbops->fb_copyarea = cfb_copyarea;
55910- info->fbops->fb_sync = NULL;
55911+ pax_open_kernel();
55912+ *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
55913+ *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
55914+ *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
55915+ *(void **)&info->fbops->fb_sync = NULL;
55916+ pax_close_kernel();
55917 info->pixmap.scan_align = 1;
55918 info->flags |= FBINFO_HWACCEL_DISABLED;
55919 info->flags &= ~FBINFO_READS_FAST;
55920@@ -1173,8 +1177,11 @@ static int nvidia_set_fbinfo(struct fb_info *info)
55921 info->pixmap.size = 8 * 1024;
55922 info->pixmap.flags = FB_PIXMAP_SYSTEM;
55923
55924- if (!hwcur)
55925- info->fbops->fb_cursor = NULL;
55926+ if (!hwcur) {
55927+ pax_open_kernel();
55928+ *(void **)&info->fbops->fb_cursor = NULL;
55929+ pax_close_kernel();
55930+ }
55931
55932 info->var.accel_flags = (!noaccel);
55933
55934diff --git a/drivers/video/fbdev/omap2/dss/display.c b/drivers/video/fbdev/omap2/dss/display.c
55935index 2412a0d..294215b 100644
55936--- a/drivers/video/fbdev/omap2/dss/display.c
55937+++ b/drivers/video/fbdev/omap2/dss/display.c
55938@@ -161,12 +161,14 @@ int omapdss_register_display(struct omap_dss_device *dssdev)
55939 if (dssdev->name == NULL)
55940 dssdev->name = dssdev->alias;
55941
55942+ pax_open_kernel();
55943 if (drv && drv->get_resolution == NULL)
55944- drv->get_resolution = omapdss_default_get_resolution;
55945+ *(void **)&drv->get_resolution = omapdss_default_get_resolution;
55946 if (drv && drv->get_recommended_bpp == NULL)
55947- drv->get_recommended_bpp = omapdss_default_get_recommended_bpp;
55948+ *(void **)&drv->get_recommended_bpp = omapdss_default_get_recommended_bpp;
55949 if (drv && drv->get_timings == NULL)
55950- drv->get_timings = omapdss_default_get_timings;
55951+ *(void **)&drv->get_timings = omapdss_default_get_timings;
55952+ pax_close_kernel();
55953
55954 mutex_lock(&panel_list_mutex);
55955 list_add_tail(&dssdev->panel_list, &panel_list);
55956diff --git a/drivers/video/fbdev/s1d13xxxfb.c b/drivers/video/fbdev/s1d13xxxfb.c
55957index 83433cb..71e9b98 100644
55958--- a/drivers/video/fbdev/s1d13xxxfb.c
55959+++ b/drivers/video/fbdev/s1d13xxxfb.c
55960@@ -881,8 +881,10 @@ static int s1d13xxxfb_probe(struct platform_device *pdev)
55961
55962 switch(prod_id) {
55963 case S1D13506_PROD_ID: /* activate acceleration */
55964- s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
55965- s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
55966+ pax_open_kernel();
55967+ *(void **)&s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
55968+ *(void **)&s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
55969+ pax_close_kernel();
55970 info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN |
55971 FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_COPYAREA;
55972 break;
55973diff --git a/drivers/video/fbdev/sh_mobile_lcdcfb.c b/drivers/video/fbdev/sh_mobile_lcdcfb.c
55974index 2bcc84a..29dd1ea 100644
55975--- a/drivers/video/fbdev/sh_mobile_lcdcfb.c
55976+++ b/drivers/video/fbdev/sh_mobile_lcdcfb.c
55977@@ -439,9 +439,9 @@ static unsigned long lcdc_sys_read_data(void *handle)
55978 }
55979
55980 static struct sh_mobile_lcdc_sys_bus_ops sh_mobile_lcdc_sys_bus_ops = {
55981- lcdc_sys_write_index,
55982- lcdc_sys_write_data,
55983- lcdc_sys_read_data,
55984+ .write_index = lcdc_sys_write_index,
55985+ .write_data = lcdc_sys_write_data,
55986+ .read_data = lcdc_sys_read_data,
55987 };
55988
55989 static int sh_mobile_lcdc_sginit(struct fb_info *info,
55990diff --git a/drivers/video/fbdev/smscufx.c b/drivers/video/fbdev/smscufx.c
55991index d513ed6..90b0de9 100644
55992--- a/drivers/video/fbdev/smscufx.c
55993+++ b/drivers/video/fbdev/smscufx.c
55994@@ -1175,7 +1175,9 @@ static int ufx_ops_release(struct fb_info *info, int user)
55995 fb_deferred_io_cleanup(info);
55996 kfree(info->fbdefio);
55997 info->fbdefio = NULL;
55998- info->fbops->fb_mmap = ufx_ops_mmap;
55999+ pax_open_kernel();
56000+ *(void **)&info->fbops->fb_mmap = ufx_ops_mmap;
56001+ pax_close_kernel();
56002 }
56003
56004 pr_debug("released /dev/fb%d user=%d count=%d",
56005diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c
56006index 77b890e..458e666 100644
56007--- a/drivers/video/fbdev/udlfb.c
56008+++ b/drivers/video/fbdev/udlfb.c
56009@@ -623,11 +623,11 @@ static int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
56010 dlfb_urb_completion(urb);
56011
56012 error:
56013- atomic_add(bytes_sent, &dev->bytes_sent);
56014- atomic_add(bytes_identical, &dev->bytes_identical);
56015- atomic_add(width*height*2, &dev->bytes_rendered);
56016+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
56017+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
56018+ atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
56019 end_cycles = get_cycles();
56020- atomic_add(((unsigned int) ((end_cycles - start_cycles)
56021+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
56022 >> 10)), /* Kcycles */
56023 &dev->cpu_kcycles_used);
56024
56025@@ -748,11 +748,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
56026 dlfb_urb_completion(urb);
56027
56028 error:
56029- atomic_add(bytes_sent, &dev->bytes_sent);
56030- atomic_add(bytes_identical, &dev->bytes_identical);
56031- atomic_add(bytes_rendered, &dev->bytes_rendered);
56032+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
56033+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
56034+ atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
56035 end_cycles = get_cycles();
56036- atomic_add(((unsigned int) ((end_cycles - start_cycles)
56037+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
56038 >> 10)), /* Kcycles */
56039 &dev->cpu_kcycles_used);
56040 }
56041@@ -993,7 +993,9 @@ static int dlfb_ops_release(struct fb_info *info, int user)
56042 fb_deferred_io_cleanup(info);
56043 kfree(info->fbdefio);
56044 info->fbdefio = NULL;
56045- info->fbops->fb_mmap = dlfb_ops_mmap;
56046+ pax_open_kernel();
56047+ *(void **)&info->fbops->fb_mmap = dlfb_ops_mmap;
56048+ pax_close_kernel();
56049 }
56050
56051 pr_warn("released /dev/fb%d user=%d count=%d\n",
56052@@ -1376,7 +1378,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
56053 struct fb_info *fb_info = dev_get_drvdata(fbdev);
56054 struct dlfb_data *dev = fb_info->par;
56055 return snprintf(buf, PAGE_SIZE, "%u\n",
56056- atomic_read(&dev->bytes_rendered));
56057+ atomic_read_unchecked(&dev->bytes_rendered));
56058 }
56059
56060 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
56061@@ -1384,7 +1386,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
56062 struct fb_info *fb_info = dev_get_drvdata(fbdev);
56063 struct dlfb_data *dev = fb_info->par;
56064 return snprintf(buf, PAGE_SIZE, "%u\n",
56065- atomic_read(&dev->bytes_identical));
56066+ atomic_read_unchecked(&dev->bytes_identical));
56067 }
56068
56069 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
56070@@ -1392,7 +1394,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
56071 struct fb_info *fb_info = dev_get_drvdata(fbdev);
56072 struct dlfb_data *dev = fb_info->par;
56073 return snprintf(buf, PAGE_SIZE, "%u\n",
56074- atomic_read(&dev->bytes_sent));
56075+ atomic_read_unchecked(&dev->bytes_sent));
56076 }
56077
56078 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
56079@@ -1400,7 +1402,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
56080 struct fb_info *fb_info = dev_get_drvdata(fbdev);
56081 struct dlfb_data *dev = fb_info->par;
56082 return snprintf(buf, PAGE_SIZE, "%u\n",
56083- atomic_read(&dev->cpu_kcycles_used));
56084+ atomic_read_unchecked(&dev->cpu_kcycles_used));
56085 }
56086
56087 static ssize_t edid_show(
56088@@ -1460,10 +1462,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
56089 struct fb_info *fb_info = dev_get_drvdata(fbdev);
56090 struct dlfb_data *dev = fb_info->par;
56091
56092- atomic_set(&dev->bytes_rendered, 0);
56093- atomic_set(&dev->bytes_identical, 0);
56094- atomic_set(&dev->bytes_sent, 0);
56095- atomic_set(&dev->cpu_kcycles_used, 0);
56096+ atomic_set_unchecked(&dev->bytes_rendered, 0);
56097+ atomic_set_unchecked(&dev->bytes_identical, 0);
56098+ atomic_set_unchecked(&dev->bytes_sent, 0);
56099+ atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
56100
56101 return count;
56102 }
56103diff --git a/drivers/video/fbdev/uvesafb.c b/drivers/video/fbdev/uvesafb.c
56104index 509d452..7c9d2de 100644
56105--- a/drivers/video/fbdev/uvesafb.c
56106+++ b/drivers/video/fbdev/uvesafb.c
56107@@ -19,6 +19,7 @@
56108 #include <linux/io.h>
56109 #include <linux/mutex.h>
56110 #include <linux/slab.h>
56111+#include <linux/moduleloader.h>
56112 #include <video/edid.h>
56113 #include <video/uvesafb.h>
56114 #ifdef CONFIG_X86
56115@@ -565,10 +566,32 @@ static int uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
56116 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
56117 par->pmi_setpal = par->ypan = 0;
56118 } else {
56119+
56120+#ifdef CONFIG_PAX_KERNEXEC
56121+#ifdef CONFIG_MODULES
56122+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
56123+#endif
56124+ if (!par->pmi_code) {
56125+ par->pmi_setpal = par->ypan = 0;
56126+ return 0;
56127+ }
56128+#endif
56129+
56130 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
56131 + task->t.regs.edi);
56132+
56133+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
56134+ pax_open_kernel();
56135+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
56136+ pax_close_kernel();
56137+
56138+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
56139+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
56140+#else
56141 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
56142 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
56143+#endif
56144+
56145 printk(KERN_INFO "uvesafb: protected mode interface info at "
56146 "%04x:%04x\n",
56147 (u16)task->t.regs.es, (u16)task->t.regs.edi);
56148@@ -813,13 +836,14 @@ static int uvesafb_vbe_init(struct fb_info *info)
56149 par->ypan = ypan;
56150
56151 if (par->pmi_setpal || par->ypan) {
56152+#if !defined(CONFIG_MODULES) || !defined(CONFIG_PAX_KERNEXEC)
56153 if (__supported_pte_mask & _PAGE_NX) {
56154 par->pmi_setpal = par->ypan = 0;
56155 printk(KERN_WARNING "uvesafb: NX protection is active, "
56156 "better not use the PMI.\n");
56157- } else {
56158+ } else
56159+#endif
56160 uvesafb_vbe_getpmi(task, par);
56161- }
56162 }
56163 #else
56164 /* The protected mode interface is not available on non-x86. */
56165@@ -1453,8 +1477,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
56166 info->fix.ywrapstep = (par->ypan > 1) ? 1 : 0;
56167
56168 /* Disable blanking if the user requested so. */
56169- if (!blank)
56170- info->fbops->fb_blank = NULL;
56171+ if (!blank) {
56172+ pax_open_kernel();
56173+ *(void **)&info->fbops->fb_blank = NULL;
56174+ pax_close_kernel();
56175+ }
56176
56177 /*
56178 * Find out how much IO memory is required for the mode with
56179@@ -1525,8 +1552,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
56180 info->flags = FBINFO_FLAG_DEFAULT |
56181 (par->ypan ? FBINFO_HWACCEL_YPAN : 0);
56182
56183- if (!par->ypan)
56184- info->fbops->fb_pan_display = NULL;
56185+ if (!par->ypan) {
56186+ pax_open_kernel();
56187+ *(void **)&info->fbops->fb_pan_display = NULL;
56188+ pax_close_kernel();
56189+ }
56190 }
56191
56192 static void uvesafb_init_mtrr(struct fb_info *info)
56193@@ -1787,6 +1817,11 @@ out_mode:
56194 out:
56195 kfree(par->vbe_modes);
56196
56197+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
56198+ if (par->pmi_code)
56199+ module_free_exec(NULL, par->pmi_code);
56200+#endif
56201+
56202 framebuffer_release(info);
56203 return err;
56204 }
56205@@ -1811,6 +1846,11 @@ static int uvesafb_remove(struct platform_device *dev)
56206 kfree(par->vbe_state_orig);
56207 kfree(par->vbe_state_saved);
56208
56209+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
56210+ if (par->pmi_code)
56211+ module_free_exec(NULL, par->pmi_code);
56212+#endif
56213+
56214 framebuffer_release(info);
56215 }
56216 return 0;
56217diff --git a/drivers/video/fbdev/vesafb.c b/drivers/video/fbdev/vesafb.c
56218index 6170e7f..dd63031 100644
56219--- a/drivers/video/fbdev/vesafb.c
56220+++ b/drivers/video/fbdev/vesafb.c
56221@@ -9,6 +9,7 @@
56222 */
56223
56224 #include <linux/module.h>
56225+#include <linux/moduleloader.h>
56226 #include <linux/kernel.h>
56227 #include <linux/errno.h>
56228 #include <linux/string.h>
56229@@ -52,8 +53,8 @@ static int vram_remap; /* Set amount of memory to be used */
56230 static int vram_total; /* Set total amount of memory */
56231 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
56232 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
56233-static void (*pmi_start)(void) __read_mostly;
56234-static void (*pmi_pal) (void) __read_mostly;
56235+static void (*pmi_start)(void) __read_only;
56236+static void (*pmi_pal) (void) __read_only;
56237 static int depth __read_mostly;
56238 static int vga_compat __read_mostly;
56239 /* --------------------------------------------------------------------- */
56240@@ -233,6 +234,7 @@ static int vesafb_probe(struct platform_device *dev)
56241 unsigned int size_remap;
56242 unsigned int size_total;
56243 char *option = NULL;
56244+ void *pmi_code = NULL;
56245
56246 /* ignore error return of fb_get_options */
56247 fb_get_options("vesafb", &option);
56248@@ -279,10 +281,6 @@ static int vesafb_probe(struct platform_device *dev)
56249 size_remap = size_total;
56250 vesafb_fix.smem_len = size_remap;
56251
56252-#ifndef __i386__
56253- screen_info.vesapm_seg = 0;
56254-#endif
56255-
56256 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
56257 printk(KERN_WARNING
56258 "vesafb: cannot reserve video memory at 0x%lx\n",
56259@@ -312,9 +310,21 @@ static int vesafb_probe(struct platform_device *dev)
56260 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
56261 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
56262
56263+#ifdef __i386__
56264+
56265+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
56266+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
56267+ if (!pmi_code)
56268+#elif !defined(CONFIG_PAX_KERNEXEC)
56269+ if (0)
56270+#endif
56271+
56272+#endif
56273+ screen_info.vesapm_seg = 0;
56274+
56275 if (screen_info.vesapm_seg) {
56276- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
56277- screen_info.vesapm_seg,screen_info.vesapm_off);
56278+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
56279+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
56280 }
56281
56282 if (screen_info.vesapm_seg < 0xc000)
56283@@ -322,9 +332,25 @@ static int vesafb_probe(struct platform_device *dev)
56284
56285 if (ypan || pmi_setpal) {
56286 unsigned short *pmi_base;
56287+
56288 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
56289- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
56290- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
56291+
56292+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
56293+ pax_open_kernel();
56294+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
56295+#else
56296+ pmi_code = pmi_base;
56297+#endif
56298+
56299+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
56300+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
56301+
56302+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
56303+ pmi_start = ktva_ktla(pmi_start);
56304+ pmi_pal = ktva_ktla(pmi_pal);
56305+ pax_close_kernel();
56306+#endif
56307+
56308 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
56309 if (pmi_base[3]) {
56310 printk(KERN_INFO "vesafb: pmi: ports = ");
56311@@ -477,8 +503,11 @@ static int vesafb_probe(struct platform_device *dev)
56312 info->flags = FBINFO_FLAG_DEFAULT | FBINFO_MISC_FIRMWARE |
56313 (ypan ? FBINFO_HWACCEL_YPAN : 0);
56314
56315- if (!ypan)
56316- info->fbops->fb_pan_display = NULL;
56317+ if (!ypan) {
56318+ pax_open_kernel();
56319+ *(void **)&info->fbops->fb_pan_display = NULL;
56320+ pax_close_kernel();
56321+ }
56322
56323 if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) {
56324 err = -ENOMEM;
56325@@ -492,6 +521,11 @@ static int vesafb_probe(struct platform_device *dev)
56326 fb_info(info, "%s frame buffer device\n", info->fix.id);
56327 return 0;
56328 err:
56329+
56330+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
56331+ module_free_exec(NULL, pmi_code);
56332+#endif
56333+
56334 if (info->screen_base)
56335 iounmap(info->screen_base);
56336 framebuffer_release(info);
56337diff --git a/drivers/video/fbdev/via/via_clock.h b/drivers/video/fbdev/via/via_clock.h
56338index 88714ae..16c2e11 100644
56339--- a/drivers/video/fbdev/via/via_clock.h
56340+++ b/drivers/video/fbdev/via/via_clock.h
56341@@ -56,7 +56,7 @@ struct via_clock {
56342
56343 void (*set_engine_pll_state)(u8 state);
56344 void (*set_engine_pll)(struct via_pll_config config);
56345-};
56346+} __no_const;
56347
56348
56349 static inline u32 get_pll_internal_frequency(u32 ref_freq,
56350diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
56351index 3c14e43..2630570 100644
56352--- a/drivers/video/logo/logo_linux_clut224.ppm
56353+++ b/drivers/video/logo/logo_linux_clut224.ppm
56354@@ -2,1603 +2,1123 @@ P3
56355 # Standard 224-color Linux logo
56356 80 80
56357 255
56358- 0 0 0 0 0 0 0 0 0 0 0 0
56359- 0 0 0 0 0 0 0 0 0 0 0 0
56360- 0 0 0 0 0 0 0 0 0 0 0 0
56361- 0 0 0 0 0 0 0 0 0 0 0 0
56362- 0 0 0 0 0 0 0 0 0 0 0 0
56363- 0 0 0 0 0 0 0 0 0 0 0 0
56364- 0 0 0 0 0 0 0 0 0 0 0 0
56365- 0 0 0 0 0 0 0 0 0 0 0 0
56366- 0 0 0 0 0 0 0 0 0 0 0 0
56367- 6 6 6 6 6 6 10 10 10 10 10 10
56368- 10 10 10 6 6 6 6 6 6 6 6 6
56369- 0 0 0 0 0 0 0 0 0 0 0 0
56370- 0 0 0 0 0 0 0 0 0 0 0 0
56371- 0 0 0 0 0 0 0 0 0 0 0 0
56372- 0 0 0 0 0 0 0 0 0 0 0 0
56373- 0 0 0 0 0 0 0 0 0 0 0 0
56374- 0 0 0 0 0 0 0 0 0 0 0 0
56375- 0 0 0 0 0 0 0 0 0 0 0 0
56376- 0 0 0 0 0 0 0 0 0 0 0 0
56377- 0 0 0 0 0 0 0 0 0 0 0 0
56378- 0 0 0 0 0 0 0 0 0 0 0 0
56379- 0 0 0 0 0 0 0 0 0 0 0 0
56380- 0 0 0 0 0 0 0 0 0 0 0 0
56381- 0 0 0 0 0 0 0 0 0 0 0 0
56382- 0 0 0 0 0 0 0 0 0 0 0 0
56383- 0 0 0 0 0 0 0 0 0 0 0 0
56384- 0 0 0 0 0 0 0 0 0 0 0 0
56385- 0 0 0 0 0 0 0 0 0 0 0 0
56386- 0 0 0 6 6 6 10 10 10 14 14 14
56387- 22 22 22 26 26 26 30 30 30 34 34 34
56388- 30 30 30 30 30 30 26 26 26 18 18 18
56389- 14 14 14 10 10 10 6 6 6 0 0 0
56390- 0 0 0 0 0 0 0 0 0 0 0 0
56391- 0 0 0 0 0 0 0 0 0 0 0 0
56392- 0 0 0 0 0 0 0 0 0 0 0 0
56393- 0 0 0 0 0 0 0 0 0 0 0 0
56394- 0 0 0 0 0 0 0 0 0 0 0 0
56395- 0 0 0 0 0 0 0 0 0 0 0 0
56396- 0 0 0 0 0 0 0 0 0 0 0 0
56397- 0 0 0 0 0 0 0 0 0 0 0 0
56398- 0 0 0 0 0 0 0 0 0 0 0 0
56399- 0 0 0 0 0 1 0 0 1 0 0 0
56400- 0 0 0 0 0 0 0 0 0 0 0 0
56401- 0 0 0 0 0 0 0 0 0 0 0 0
56402- 0 0 0 0 0 0 0 0 0 0 0 0
56403- 0 0 0 0 0 0 0 0 0 0 0 0
56404- 0 0 0 0 0 0 0 0 0 0 0 0
56405- 0 0 0 0 0 0 0 0 0 0 0 0
56406- 6 6 6 14 14 14 26 26 26 42 42 42
56407- 54 54 54 66 66 66 78 78 78 78 78 78
56408- 78 78 78 74 74 74 66 66 66 54 54 54
56409- 42 42 42 26 26 26 18 18 18 10 10 10
56410- 6 6 6 0 0 0 0 0 0 0 0 0
56411- 0 0 0 0 0 0 0 0 0 0 0 0
56412- 0 0 0 0 0 0 0 0 0 0 0 0
56413- 0 0 0 0 0 0 0 0 0 0 0 0
56414- 0 0 0 0 0 0 0 0 0 0 0 0
56415- 0 0 0 0 0 0 0 0 0 0 0 0
56416- 0 0 0 0 0 0 0 0 0 0 0 0
56417- 0 0 0 0 0 0 0 0 0 0 0 0
56418- 0 0 0 0 0 0 0 0 0 0 0 0
56419- 0 0 1 0 0 0 0 0 0 0 0 0
56420- 0 0 0 0 0 0 0 0 0 0 0 0
56421- 0 0 0 0 0 0 0 0 0 0 0 0
56422- 0 0 0 0 0 0 0 0 0 0 0 0
56423- 0 0 0 0 0 0 0 0 0 0 0 0
56424- 0 0 0 0 0 0 0 0 0 0 0 0
56425- 0 0 0 0 0 0 0 0 0 10 10 10
56426- 22 22 22 42 42 42 66 66 66 86 86 86
56427- 66 66 66 38 38 38 38 38 38 22 22 22
56428- 26 26 26 34 34 34 54 54 54 66 66 66
56429- 86 86 86 70 70 70 46 46 46 26 26 26
56430- 14 14 14 6 6 6 0 0 0 0 0 0
56431- 0 0 0 0 0 0 0 0 0 0 0 0
56432- 0 0 0 0 0 0 0 0 0 0 0 0
56433- 0 0 0 0 0 0 0 0 0 0 0 0
56434- 0 0 0 0 0 0 0 0 0 0 0 0
56435- 0 0 0 0 0 0 0 0 0 0 0 0
56436- 0 0 0 0 0 0 0 0 0 0 0 0
56437- 0 0 0 0 0 0 0 0 0 0 0 0
56438- 0 0 0 0 0 0 0 0 0 0 0 0
56439- 0 0 1 0 0 1 0 0 1 0 0 0
56440- 0 0 0 0 0 0 0 0 0 0 0 0
56441- 0 0 0 0 0 0 0 0 0 0 0 0
56442- 0 0 0 0 0 0 0 0 0 0 0 0
56443- 0 0 0 0 0 0 0 0 0 0 0 0
56444- 0 0 0 0 0 0 0 0 0 0 0 0
56445- 0 0 0 0 0 0 10 10 10 26 26 26
56446- 50 50 50 82 82 82 58 58 58 6 6 6
56447- 2 2 6 2 2 6 2 2 6 2 2 6
56448- 2 2 6 2 2 6 2 2 6 2 2 6
56449- 6 6 6 54 54 54 86 86 86 66 66 66
56450- 38 38 38 18 18 18 6 6 6 0 0 0
56451- 0 0 0 0 0 0 0 0 0 0 0 0
56452- 0 0 0 0 0 0 0 0 0 0 0 0
56453- 0 0 0 0 0 0 0 0 0 0 0 0
56454- 0 0 0 0 0 0 0 0 0 0 0 0
56455- 0 0 0 0 0 0 0 0 0 0 0 0
56456- 0 0 0 0 0 0 0 0 0 0 0 0
56457- 0 0 0 0 0 0 0 0 0 0 0 0
56458- 0 0 0 0 0 0 0 0 0 0 0 0
56459- 0 0 0 0 0 0 0 0 0 0 0 0
56460- 0 0 0 0 0 0 0 0 0 0 0 0
56461- 0 0 0 0 0 0 0 0 0 0 0 0
56462- 0 0 0 0 0 0 0 0 0 0 0 0
56463- 0 0 0 0 0 0 0 0 0 0 0 0
56464- 0 0 0 0 0 0 0 0 0 0 0 0
56465- 0 0 0 6 6 6 22 22 22 50 50 50
56466- 78 78 78 34 34 34 2 2 6 2 2 6
56467- 2 2 6 2 2 6 2 2 6 2 2 6
56468- 2 2 6 2 2 6 2 2 6 2 2 6
56469- 2 2 6 2 2 6 6 6 6 70 70 70
56470- 78 78 78 46 46 46 22 22 22 6 6 6
56471- 0 0 0 0 0 0 0 0 0 0 0 0
56472- 0 0 0 0 0 0 0 0 0 0 0 0
56473- 0 0 0 0 0 0 0 0 0 0 0 0
56474- 0 0 0 0 0 0 0 0 0 0 0 0
56475- 0 0 0 0 0 0 0 0 0 0 0 0
56476- 0 0 0 0 0 0 0 0 0 0 0 0
56477- 0 0 0 0 0 0 0 0 0 0 0 0
56478- 0 0 0 0 0 0 0 0 0 0 0 0
56479- 0 0 1 0 0 1 0 0 1 0 0 0
56480- 0 0 0 0 0 0 0 0 0 0 0 0
56481- 0 0 0 0 0 0 0 0 0 0 0 0
56482- 0 0 0 0 0 0 0 0 0 0 0 0
56483- 0 0 0 0 0 0 0 0 0 0 0 0
56484- 0 0 0 0 0 0 0 0 0 0 0 0
56485- 6 6 6 18 18 18 42 42 42 82 82 82
56486- 26 26 26 2 2 6 2 2 6 2 2 6
56487- 2 2 6 2 2 6 2 2 6 2 2 6
56488- 2 2 6 2 2 6 2 2 6 14 14 14
56489- 46 46 46 34 34 34 6 6 6 2 2 6
56490- 42 42 42 78 78 78 42 42 42 18 18 18
56491- 6 6 6 0 0 0 0 0 0 0 0 0
56492- 0 0 0 0 0 0 0 0 0 0 0 0
56493- 0 0 0 0 0 0 0 0 0 0 0 0
56494- 0 0 0 0 0 0 0 0 0 0 0 0
56495- 0 0 0 0 0 0 0 0 0 0 0 0
56496- 0 0 0 0 0 0 0 0 0 0 0 0
56497- 0 0 0 0 0 0 0 0 0 0 0 0
56498- 0 0 0 0 0 0 0 0 0 0 0 0
56499- 0 0 1 0 0 0 0 0 1 0 0 0
56500- 0 0 0 0 0 0 0 0 0 0 0 0
56501- 0 0 0 0 0 0 0 0 0 0 0 0
56502- 0 0 0 0 0 0 0 0 0 0 0 0
56503- 0 0 0 0 0 0 0 0 0 0 0 0
56504- 0 0 0 0 0 0 0 0 0 0 0 0
56505- 10 10 10 30 30 30 66 66 66 58 58 58
56506- 2 2 6 2 2 6 2 2 6 2 2 6
56507- 2 2 6 2 2 6 2 2 6 2 2 6
56508- 2 2 6 2 2 6 2 2 6 26 26 26
56509- 86 86 86 101 101 101 46 46 46 10 10 10
56510- 2 2 6 58 58 58 70 70 70 34 34 34
56511- 10 10 10 0 0 0 0 0 0 0 0 0
56512- 0 0 0 0 0 0 0 0 0 0 0 0
56513- 0 0 0 0 0 0 0 0 0 0 0 0
56514- 0 0 0 0 0 0 0 0 0 0 0 0
56515- 0 0 0 0 0 0 0 0 0 0 0 0
56516- 0 0 0 0 0 0 0 0 0 0 0 0
56517- 0 0 0 0 0 0 0 0 0 0 0 0
56518- 0 0 0 0 0 0 0 0 0 0 0 0
56519- 0 0 1 0 0 1 0 0 1 0 0 0
56520- 0 0 0 0 0 0 0 0 0 0 0 0
56521- 0 0 0 0 0 0 0 0 0 0 0 0
56522- 0 0 0 0 0 0 0 0 0 0 0 0
56523- 0 0 0 0 0 0 0 0 0 0 0 0
56524- 0 0 0 0 0 0 0 0 0 0 0 0
56525- 14 14 14 42 42 42 86 86 86 10 10 10
56526- 2 2 6 2 2 6 2 2 6 2 2 6
56527- 2 2 6 2 2 6 2 2 6 2 2 6
56528- 2 2 6 2 2 6 2 2 6 30 30 30
56529- 94 94 94 94 94 94 58 58 58 26 26 26
56530- 2 2 6 6 6 6 78 78 78 54 54 54
56531- 22 22 22 6 6 6 0 0 0 0 0 0
56532- 0 0 0 0 0 0 0 0 0 0 0 0
56533- 0 0 0 0 0 0 0 0 0 0 0 0
56534- 0 0 0 0 0 0 0 0 0 0 0 0
56535- 0 0 0 0 0 0 0 0 0 0 0 0
56536- 0 0 0 0 0 0 0 0 0 0 0 0
56537- 0 0 0 0 0 0 0 0 0 0 0 0
56538- 0 0 0 0 0 0 0 0 0 0 0 0
56539- 0 0 0 0 0 0 0 0 0 0 0 0
56540- 0 0 0 0 0 0 0 0 0 0 0 0
56541- 0 0 0 0 0 0 0 0 0 0 0 0
56542- 0 0 0 0 0 0 0 0 0 0 0 0
56543- 0 0 0 0 0 0 0 0 0 0 0 0
56544- 0 0 0 0 0 0 0 0 0 6 6 6
56545- 22 22 22 62 62 62 62 62 62 2 2 6
56546- 2 2 6 2 2 6 2 2 6 2 2 6
56547- 2 2 6 2 2 6 2 2 6 2 2 6
56548- 2 2 6 2 2 6 2 2 6 26 26 26
56549- 54 54 54 38 38 38 18 18 18 10 10 10
56550- 2 2 6 2 2 6 34 34 34 82 82 82
56551- 38 38 38 14 14 14 0 0 0 0 0 0
56552- 0 0 0 0 0 0 0 0 0 0 0 0
56553- 0 0 0 0 0 0 0 0 0 0 0 0
56554- 0 0 0 0 0 0 0 0 0 0 0 0
56555- 0 0 0 0 0 0 0 0 0 0 0 0
56556- 0 0 0 0 0 0 0 0 0 0 0 0
56557- 0 0 0 0 0 0 0 0 0 0 0 0
56558- 0 0 0 0 0 0 0 0 0 0 0 0
56559- 0 0 0 0 0 1 0 0 1 0 0 0
56560- 0 0 0 0 0 0 0 0 0 0 0 0
56561- 0 0 0 0 0 0 0 0 0 0 0 0
56562- 0 0 0 0 0 0 0 0 0 0 0 0
56563- 0 0 0 0 0 0 0 0 0 0 0 0
56564- 0 0 0 0 0 0 0 0 0 6 6 6
56565- 30 30 30 78 78 78 30 30 30 2 2 6
56566- 2 2 6 2 2 6 2 2 6 2 2 6
56567- 2 2 6 2 2 6 2 2 6 2 2 6
56568- 2 2 6 2 2 6 2 2 6 10 10 10
56569- 10 10 10 2 2 6 2 2 6 2 2 6
56570- 2 2 6 2 2 6 2 2 6 78 78 78
56571- 50 50 50 18 18 18 6 6 6 0 0 0
56572- 0 0 0 0 0 0 0 0 0 0 0 0
56573- 0 0 0 0 0 0 0 0 0 0 0 0
56574- 0 0 0 0 0 0 0 0 0 0 0 0
56575- 0 0 0 0 0 0 0 0 0 0 0 0
56576- 0 0 0 0 0 0 0 0 0 0 0 0
56577- 0 0 0 0 0 0 0 0 0 0 0 0
56578- 0 0 0 0 0 0 0 0 0 0 0 0
56579- 0 0 1 0 0 0 0 0 0 0 0 0
56580- 0 0 0 0 0 0 0 0 0 0 0 0
56581- 0 0 0 0 0 0 0 0 0 0 0 0
56582- 0 0 0 0 0 0 0 0 0 0 0 0
56583- 0 0 0 0 0 0 0 0 0 0 0 0
56584- 0 0 0 0 0 0 0 0 0 10 10 10
56585- 38 38 38 86 86 86 14 14 14 2 2 6
56586- 2 2 6 2 2 6 2 2 6 2 2 6
56587- 2 2 6 2 2 6 2 2 6 2 2 6
56588- 2 2 6 2 2 6 2 2 6 2 2 6
56589- 2 2 6 2 2 6 2 2 6 2 2 6
56590- 2 2 6 2 2 6 2 2 6 54 54 54
56591- 66 66 66 26 26 26 6 6 6 0 0 0
56592- 0 0 0 0 0 0 0 0 0 0 0 0
56593- 0 0 0 0 0 0 0 0 0 0 0 0
56594- 0 0 0 0 0 0 0 0 0 0 0 0
56595- 0 0 0 0 0 0 0 0 0 0 0 0
56596- 0 0 0 0 0 0 0 0 0 0 0 0
56597- 0 0 0 0 0 0 0 0 0 0 0 0
56598- 0 0 0 0 0 0 0 0 0 0 0 0
56599- 0 0 0 0 0 1 0 0 1 0 0 0
56600- 0 0 0 0 0 0 0 0 0 0 0 0
56601- 0 0 0 0 0 0 0 0 0 0 0 0
56602- 0 0 0 0 0 0 0 0 0 0 0 0
56603- 0 0 0 0 0 0 0 0 0 0 0 0
56604- 0 0 0 0 0 0 0 0 0 14 14 14
56605- 42 42 42 82 82 82 2 2 6 2 2 6
56606- 2 2 6 6 6 6 10 10 10 2 2 6
56607- 2 2 6 2 2 6 2 2 6 2 2 6
56608- 2 2 6 2 2 6 2 2 6 6 6 6
56609- 14 14 14 10 10 10 2 2 6 2 2 6
56610- 2 2 6 2 2 6 2 2 6 18 18 18
56611- 82 82 82 34 34 34 10 10 10 0 0 0
56612- 0 0 0 0 0 0 0 0 0 0 0 0
56613- 0 0 0 0 0 0 0 0 0 0 0 0
56614- 0 0 0 0 0 0 0 0 0 0 0 0
56615- 0 0 0 0 0 0 0 0 0 0 0 0
56616- 0 0 0 0 0 0 0 0 0 0 0 0
56617- 0 0 0 0 0 0 0 0 0 0 0 0
56618- 0 0 0 0 0 0 0 0 0 0 0 0
56619- 0 0 1 0 0 0 0 0 0 0 0 0
56620- 0 0 0 0 0 0 0 0 0 0 0 0
56621- 0 0 0 0 0 0 0 0 0 0 0 0
56622- 0 0 0 0 0 0 0 0 0 0 0 0
56623- 0 0 0 0 0 0 0 0 0 0 0 0
56624- 0 0 0 0 0 0 0 0 0 14 14 14
56625- 46 46 46 86 86 86 2 2 6 2 2 6
56626- 6 6 6 6 6 6 22 22 22 34 34 34
56627- 6 6 6 2 2 6 2 2 6 2 2 6
56628- 2 2 6 2 2 6 18 18 18 34 34 34
56629- 10 10 10 50 50 50 22 22 22 2 2 6
56630- 2 2 6 2 2 6 2 2 6 10 10 10
56631- 86 86 86 42 42 42 14 14 14 0 0 0
56632- 0 0 0 0 0 0 0 0 0 0 0 0
56633- 0 0 0 0 0 0 0 0 0 0 0 0
56634- 0 0 0 0 0 0 0 0 0 0 0 0
56635- 0 0 0 0 0 0 0 0 0 0 0 0
56636- 0 0 0 0 0 0 0 0 0 0 0 0
56637- 0 0 0 0 0 0 0 0 0 0 0 0
56638- 0 0 0 0 0 0 0 0 0 0 0 0
56639- 0 0 1 0 0 1 0 0 1 0 0 0
56640- 0 0 0 0 0 0 0 0 0 0 0 0
56641- 0 0 0 0 0 0 0 0 0 0 0 0
56642- 0 0 0 0 0 0 0 0 0 0 0 0
56643- 0 0 0 0 0 0 0 0 0 0 0 0
56644- 0 0 0 0 0 0 0 0 0 14 14 14
56645- 46 46 46 86 86 86 2 2 6 2 2 6
56646- 38 38 38 116 116 116 94 94 94 22 22 22
56647- 22 22 22 2 2 6 2 2 6 2 2 6
56648- 14 14 14 86 86 86 138 138 138 162 162 162
56649-154 154 154 38 38 38 26 26 26 6 6 6
56650- 2 2 6 2 2 6 2 2 6 2 2 6
56651- 86 86 86 46 46 46 14 14 14 0 0 0
56652- 0 0 0 0 0 0 0 0 0 0 0 0
56653- 0 0 0 0 0 0 0 0 0 0 0 0
56654- 0 0 0 0 0 0 0 0 0 0 0 0
56655- 0 0 0 0 0 0 0 0 0 0 0 0
56656- 0 0 0 0 0 0 0 0 0 0 0 0
56657- 0 0 0 0 0 0 0 0 0 0 0 0
56658- 0 0 0 0 0 0 0 0 0 0 0 0
56659- 0 0 0 0 0 0 0 0 0 0 0 0
56660- 0 0 0 0 0 0 0 0 0 0 0 0
56661- 0 0 0 0 0 0 0 0 0 0 0 0
56662- 0 0 0 0 0 0 0 0 0 0 0 0
56663- 0 0 0 0 0 0 0 0 0 0 0 0
56664- 0 0 0 0 0 0 0 0 0 14 14 14
56665- 46 46 46 86 86 86 2 2 6 14 14 14
56666-134 134 134 198 198 198 195 195 195 116 116 116
56667- 10 10 10 2 2 6 2 2 6 6 6 6
56668-101 98 89 187 187 187 210 210 210 218 218 218
56669-214 214 214 134 134 134 14 14 14 6 6 6
56670- 2 2 6 2 2 6 2 2 6 2 2 6
56671- 86 86 86 50 50 50 18 18 18 6 6 6
56672- 0 0 0 0 0 0 0 0 0 0 0 0
56673- 0 0 0 0 0 0 0 0 0 0 0 0
56674- 0 0 0 0 0 0 0 0 0 0 0 0
56675- 0 0 0 0 0 0 0 0 0 0 0 0
56676- 0 0 0 0 0 0 0 0 0 0 0 0
56677- 0 0 0 0 0 0 0 0 0 0 0 0
56678- 0 0 0 0 0 0 0 0 1 0 0 0
56679- 0 0 1 0 0 1 0 0 1 0 0 0
56680- 0 0 0 0 0 0 0 0 0 0 0 0
56681- 0 0 0 0 0 0 0 0 0 0 0 0
56682- 0 0 0 0 0 0 0 0 0 0 0 0
56683- 0 0 0 0 0 0 0 0 0 0 0 0
56684- 0 0 0 0 0 0 0 0 0 14 14 14
56685- 46 46 46 86 86 86 2 2 6 54 54 54
56686-218 218 218 195 195 195 226 226 226 246 246 246
56687- 58 58 58 2 2 6 2 2 6 30 30 30
56688-210 210 210 253 253 253 174 174 174 123 123 123
56689-221 221 221 234 234 234 74 74 74 2 2 6
56690- 2 2 6 2 2 6 2 2 6 2 2 6
56691- 70 70 70 58 58 58 22 22 22 6 6 6
56692- 0 0 0 0 0 0 0 0 0 0 0 0
56693- 0 0 0 0 0 0 0 0 0 0 0 0
56694- 0 0 0 0 0 0 0 0 0 0 0 0
56695- 0 0 0 0 0 0 0 0 0 0 0 0
56696- 0 0 0 0 0 0 0 0 0 0 0 0
56697- 0 0 0 0 0 0 0 0 0 0 0 0
56698- 0 0 0 0 0 0 0 0 0 0 0 0
56699- 0 0 0 0 0 0 0 0 0 0 0 0
56700- 0 0 0 0 0 0 0 0 0 0 0 0
56701- 0 0 0 0 0 0 0 0 0 0 0 0
56702- 0 0 0 0 0 0 0 0 0 0 0 0
56703- 0 0 0 0 0 0 0 0 0 0 0 0
56704- 0 0 0 0 0 0 0 0 0 14 14 14
56705- 46 46 46 82 82 82 2 2 6 106 106 106
56706-170 170 170 26 26 26 86 86 86 226 226 226
56707-123 123 123 10 10 10 14 14 14 46 46 46
56708-231 231 231 190 190 190 6 6 6 70 70 70
56709- 90 90 90 238 238 238 158 158 158 2 2 6
56710- 2 2 6 2 2 6 2 2 6 2 2 6
56711- 70 70 70 58 58 58 22 22 22 6 6 6
56712- 0 0 0 0 0 0 0 0 0 0 0 0
56713- 0 0 0 0 0 0 0 0 0 0 0 0
56714- 0 0 0 0 0 0 0 0 0 0 0 0
56715- 0 0 0 0 0 0 0 0 0 0 0 0
56716- 0 0 0 0 0 0 0 0 0 0 0 0
56717- 0 0 0 0 0 0 0 0 0 0 0 0
56718- 0 0 0 0 0 0 0 0 1 0 0 0
56719- 0 0 1 0 0 1 0 0 1 0 0 0
56720- 0 0 0 0 0 0 0 0 0 0 0 0
56721- 0 0 0 0 0 0 0 0 0 0 0 0
56722- 0 0 0 0 0 0 0 0 0 0 0 0
56723- 0 0 0 0 0 0 0 0 0 0 0 0
56724- 0 0 0 0 0 0 0 0 0 14 14 14
56725- 42 42 42 86 86 86 6 6 6 116 116 116
56726-106 106 106 6 6 6 70 70 70 149 149 149
56727-128 128 128 18 18 18 38 38 38 54 54 54
56728-221 221 221 106 106 106 2 2 6 14 14 14
56729- 46 46 46 190 190 190 198 198 198 2 2 6
56730- 2 2 6 2 2 6 2 2 6 2 2 6
56731- 74 74 74 62 62 62 22 22 22 6 6 6
56732- 0 0 0 0 0 0 0 0 0 0 0 0
56733- 0 0 0 0 0 0 0 0 0 0 0 0
56734- 0 0 0 0 0 0 0 0 0 0 0 0
56735- 0 0 0 0 0 0 0 0 0 0 0 0
56736- 0 0 0 0 0 0 0 0 0 0 0 0
56737- 0 0 0 0 0 0 0 0 0 0 0 0
56738- 0 0 0 0 0 0 0 0 1 0 0 0
56739- 0 0 1 0 0 0 0 0 1 0 0 0
56740- 0 0 0 0 0 0 0 0 0 0 0 0
56741- 0 0 0 0 0 0 0 0 0 0 0 0
56742- 0 0 0 0 0 0 0 0 0 0 0 0
56743- 0 0 0 0 0 0 0 0 0 0 0 0
56744- 0 0 0 0 0 0 0 0 0 14 14 14
56745- 42 42 42 94 94 94 14 14 14 101 101 101
56746-128 128 128 2 2 6 18 18 18 116 116 116
56747-118 98 46 121 92 8 121 92 8 98 78 10
56748-162 162 162 106 106 106 2 2 6 2 2 6
56749- 2 2 6 195 195 195 195 195 195 6 6 6
56750- 2 2 6 2 2 6 2 2 6 2 2 6
56751- 74 74 74 62 62 62 22 22 22 6 6 6
56752- 0 0 0 0 0 0 0 0 0 0 0 0
56753- 0 0 0 0 0 0 0 0 0 0 0 0
56754- 0 0 0 0 0 0 0 0 0 0 0 0
56755- 0 0 0 0 0 0 0 0 0 0 0 0
56756- 0 0 0 0 0 0 0 0 0 0 0 0
56757- 0 0 0 0 0 0 0 0 0 0 0 0
56758- 0 0 0 0 0 0 0 0 1 0 0 1
56759- 0 0 1 0 0 0 0 0 1 0 0 0
56760- 0 0 0 0 0 0 0 0 0 0 0 0
56761- 0 0 0 0 0 0 0 0 0 0 0 0
56762- 0 0 0 0 0 0 0 0 0 0 0 0
56763- 0 0 0 0 0 0 0 0 0 0 0 0
56764- 0 0 0 0 0 0 0 0 0 10 10 10
56765- 38 38 38 90 90 90 14 14 14 58 58 58
56766-210 210 210 26 26 26 54 38 6 154 114 10
56767-226 170 11 236 186 11 225 175 15 184 144 12
56768-215 174 15 175 146 61 37 26 9 2 2 6
56769- 70 70 70 246 246 246 138 138 138 2 2 6
56770- 2 2 6 2 2 6 2 2 6 2 2 6
56771- 70 70 70 66 66 66 26 26 26 6 6 6
56772- 0 0 0 0 0 0 0 0 0 0 0 0
56773- 0 0 0 0 0 0 0 0 0 0 0 0
56774- 0 0 0 0 0 0 0 0 0 0 0 0
56775- 0 0 0 0 0 0 0 0 0 0 0 0
56776- 0 0 0 0 0 0 0 0 0 0 0 0
56777- 0 0 0 0 0 0 0 0 0 0 0 0
56778- 0 0 0 0 0 0 0 0 0 0 0 0
56779- 0 0 0 0 0 0 0 0 0 0 0 0
56780- 0 0 0 0 0 0 0 0 0 0 0 0
56781- 0 0 0 0 0 0 0 0 0 0 0 0
56782- 0 0 0 0 0 0 0 0 0 0 0 0
56783- 0 0 0 0 0 0 0 0 0 0 0 0
56784- 0 0 0 0 0 0 0 0 0 10 10 10
56785- 38 38 38 86 86 86 14 14 14 10 10 10
56786-195 195 195 188 164 115 192 133 9 225 175 15
56787-239 182 13 234 190 10 232 195 16 232 200 30
56788-245 207 45 241 208 19 232 195 16 184 144 12
56789-218 194 134 211 206 186 42 42 42 2 2 6
56790- 2 2 6 2 2 6 2 2 6 2 2 6
56791- 50 50 50 74 74 74 30 30 30 6 6 6
56792- 0 0 0 0 0 0 0 0 0 0 0 0
56793- 0 0 0 0 0 0 0 0 0 0 0 0
56794- 0 0 0 0 0 0 0 0 0 0 0 0
56795- 0 0 0 0 0 0 0 0 0 0 0 0
56796- 0 0 0 0 0 0 0 0 0 0 0 0
56797- 0 0 0 0 0 0 0 0 0 0 0 0
56798- 0 0 0 0 0 0 0 0 0 0 0 0
56799- 0 0 0 0 0 0 0 0 0 0 0 0
56800- 0 0 0 0 0 0 0 0 0 0 0 0
56801- 0 0 0 0 0 0 0 0 0 0 0 0
56802- 0 0 0 0 0 0 0 0 0 0 0 0
56803- 0 0 0 0 0 0 0 0 0 0 0 0
56804- 0 0 0 0 0 0 0 0 0 10 10 10
56805- 34 34 34 86 86 86 14 14 14 2 2 6
56806-121 87 25 192 133 9 219 162 10 239 182 13
56807-236 186 11 232 195 16 241 208 19 244 214 54
56808-246 218 60 246 218 38 246 215 20 241 208 19
56809-241 208 19 226 184 13 121 87 25 2 2 6
56810- 2 2 6 2 2 6 2 2 6 2 2 6
56811- 50 50 50 82 82 82 34 34 34 10 10 10
56812- 0 0 0 0 0 0 0 0 0 0 0 0
56813- 0 0 0 0 0 0 0 0 0 0 0 0
56814- 0 0 0 0 0 0 0 0 0 0 0 0
56815- 0 0 0 0 0 0 0 0 0 0 0 0
56816- 0 0 0 0 0 0 0 0 0 0 0 0
56817- 0 0 0 0 0 0 0 0 0 0 0 0
56818- 0 0 0 0 0 0 0 0 0 0 0 0
56819- 0 0 0 0 0 0 0 0 0 0 0 0
56820- 0 0 0 0 0 0 0 0 0 0 0 0
56821- 0 0 0 0 0 0 0 0 0 0 0 0
56822- 0 0 0 0 0 0 0 0 0 0 0 0
56823- 0 0 0 0 0 0 0 0 0 0 0 0
56824- 0 0 0 0 0 0 0 0 0 10 10 10
56825- 34 34 34 82 82 82 30 30 30 61 42 6
56826-180 123 7 206 145 10 230 174 11 239 182 13
56827-234 190 10 238 202 15 241 208 19 246 218 74
56828-246 218 38 246 215 20 246 215 20 246 215 20
56829-226 184 13 215 174 15 184 144 12 6 6 6
56830- 2 2 6 2 2 6 2 2 6 2 2 6
56831- 26 26 26 94 94 94 42 42 42 14 14 14
56832- 0 0 0 0 0 0 0 0 0 0 0 0
56833- 0 0 0 0 0 0 0 0 0 0 0 0
56834- 0 0 0 0 0 0 0 0 0 0 0 0
56835- 0 0 0 0 0 0 0 0 0 0 0 0
56836- 0 0 0 0 0 0 0 0 0 0 0 0
56837- 0 0 0 0 0 0 0 0 0 0 0 0
56838- 0 0 0 0 0 0 0 0 0 0 0 0
56839- 0 0 0 0 0 0 0 0 0 0 0 0
56840- 0 0 0 0 0 0 0 0 0 0 0 0
56841- 0 0 0 0 0 0 0 0 0 0 0 0
56842- 0 0 0 0 0 0 0 0 0 0 0 0
56843- 0 0 0 0 0 0 0 0 0 0 0 0
56844- 0 0 0 0 0 0 0 0 0 10 10 10
56845- 30 30 30 78 78 78 50 50 50 104 69 6
56846-192 133 9 216 158 10 236 178 12 236 186 11
56847-232 195 16 241 208 19 244 214 54 245 215 43
56848-246 215 20 246 215 20 241 208 19 198 155 10
56849-200 144 11 216 158 10 156 118 10 2 2 6
56850- 2 2 6 2 2 6 2 2 6 2 2 6
56851- 6 6 6 90 90 90 54 54 54 18 18 18
56852- 6 6 6 0 0 0 0 0 0 0 0 0
56853- 0 0 0 0 0 0 0 0 0 0 0 0
56854- 0 0 0 0 0 0 0 0 0 0 0 0
56855- 0 0 0 0 0 0 0 0 0 0 0 0
56856- 0 0 0 0 0 0 0 0 0 0 0 0
56857- 0 0 0 0 0 0 0 0 0 0 0 0
56858- 0 0 0 0 0 0 0 0 0 0 0 0
56859- 0 0 0 0 0 0 0 0 0 0 0 0
56860- 0 0 0 0 0 0 0 0 0 0 0 0
56861- 0 0 0 0 0 0 0 0 0 0 0 0
56862- 0 0 0 0 0 0 0 0 0 0 0 0
56863- 0 0 0 0 0 0 0 0 0 0 0 0
56864- 0 0 0 0 0 0 0 0 0 10 10 10
56865- 30 30 30 78 78 78 46 46 46 22 22 22
56866-137 92 6 210 162 10 239 182 13 238 190 10
56867-238 202 15 241 208 19 246 215 20 246 215 20
56868-241 208 19 203 166 17 185 133 11 210 150 10
56869-216 158 10 210 150 10 102 78 10 2 2 6
56870- 6 6 6 54 54 54 14 14 14 2 2 6
56871- 2 2 6 62 62 62 74 74 74 30 30 30
56872- 10 10 10 0 0 0 0 0 0 0 0 0
56873- 0 0 0 0 0 0 0 0 0 0 0 0
56874- 0 0 0 0 0 0 0 0 0 0 0 0
56875- 0 0 0 0 0 0 0 0 0 0 0 0
56876- 0 0 0 0 0 0 0 0 0 0 0 0
56877- 0 0 0 0 0 0 0 0 0 0 0 0
56878- 0 0 0 0 0 0 0 0 0 0 0 0
56879- 0 0 0 0 0 0 0 0 0 0 0 0
56880- 0 0 0 0 0 0 0 0 0 0 0 0
56881- 0 0 0 0 0 0 0 0 0 0 0 0
56882- 0 0 0 0 0 0 0 0 0 0 0 0
56883- 0 0 0 0 0 0 0 0 0 0 0 0
56884- 0 0 0 0 0 0 0 0 0 10 10 10
56885- 34 34 34 78 78 78 50 50 50 6 6 6
56886- 94 70 30 139 102 15 190 146 13 226 184 13
56887-232 200 30 232 195 16 215 174 15 190 146 13
56888-168 122 10 192 133 9 210 150 10 213 154 11
56889-202 150 34 182 157 106 101 98 89 2 2 6
56890- 2 2 6 78 78 78 116 116 116 58 58 58
56891- 2 2 6 22 22 22 90 90 90 46 46 46
56892- 18 18 18 6 6 6 0 0 0 0 0 0
56893- 0 0 0 0 0 0 0 0 0 0 0 0
56894- 0 0 0 0 0 0 0 0 0 0 0 0
56895- 0 0 0 0 0 0 0 0 0 0 0 0
56896- 0 0 0 0 0 0 0 0 0 0 0 0
56897- 0 0 0 0 0 0 0 0 0 0 0 0
56898- 0 0 0 0 0 0 0 0 0 0 0 0
56899- 0 0 0 0 0 0 0 0 0 0 0 0
56900- 0 0 0 0 0 0 0 0 0 0 0 0
56901- 0 0 0 0 0 0 0 0 0 0 0 0
56902- 0 0 0 0 0 0 0 0 0 0 0 0
56903- 0 0 0 0 0 0 0 0 0 0 0 0
56904- 0 0 0 0 0 0 0 0 0 10 10 10
56905- 38 38 38 86 86 86 50 50 50 6 6 6
56906-128 128 128 174 154 114 156 107 11 168 122 10
56907-198 155 10 184 144 12 197 138 11 200 144 11
56908-206 145 10 206 145 10 197 138 11 188 164 115
56909-195 195 195 198 198 198 174 174 174 14 14 14
56910- 2 2 6 22 22 22 116 116 116 116 116 116
56911- 22 22 22 2 2 6 74 74 74 70 70 70
56912- 30 30 30 10 10 10 0 0 0 0 0 0
56913- 0 0 0 0 0 0 0 0 0 0 0 0
56914- 0 0 0 0 0 0 0 0 0 0 0 0
56915- 0 0 0 0 0 0 0 0 0 0 0 0
56916- 0 0 0 0 0 0 0 0 0 0 0 0
56917- 0 0 0 0 0 0 0 0 0 0 0 0
56918- 0 0 0 0 0 0 0 0 0 0 0 0
56919- 0 0 0 0 0 0 0 0 0 0 0 0
56920- 0 0 0 0 0 0 0 0 0 0 0 0
56921- 0 0 0 0 0 0 0 0 0 0 0 0
56922- 0 0 0 0 0 0 0 0 0 0 0 0
56923- 0 0 0 0 0 0 0 0 0 0 0 0
56924- 0 0 0 0 0 0 6 6 6 18 18 18
56925- 50 50 50 101 101 101 26 26 26 10 10 10
56926-138 138 138 190 190 190 174 154 114 156 107 11
56927-197 138 11 200 144 11 197 138 11 192 133 9
56928-180 123 7 190 142 34 190 178 144 187 187 187
56929-202 202 202 221 221 221 214 214 214 66 66 66
56930- 2 2 6 2 2 6 50 50 50 62 62 62
56931- 6 6 6 2 2 6 10 10 10 90 90 90
56932- 50 50 50 18 18 18 6 6 6 0 0 0
56933- 0 0 0 0 0 0 0 0 0 0 0 0
56934- 0 0 0 0 0 0 0 0 0 0 0 0
56935- 0 0 0 0 0 0 0 0 0 0 0 0
56936- 0 0 0 0 0 0 0 0 0 0 0 0
56937- 0 0 0 0 0 0 0 0 0 0 0 0
56938- 0 0 0 0 0 0 0 0 0 0 0 0
56939- 0 0 0 0 0 0 0 0 0 0 0 0
56940- 0 0 0 0 0 0 0 0 0 0 0 0
56941- 0 0 0 0 0 0 0 0 0 0 0 0
56942- 0 0 0 0 0 0 0 0 0 0 0 0
56943- 0 0 0 0 0 0 0 0 0 0 0 0
56944- 0 0 0 0 0 0 10 10 10 34 34 34
56945- 74 74 74 74 74 74 2 2 6 6 6 6
56946-144 144 144 198 198 198 190 190 190 178 166 146
56947-154 121 60 156 107 11 156 107 11 168 124 44
56948-174 154 114 187 187 187 190 190 190 210 210 210
56949-246 246 246 253 253 253 253 253 253 182 182 182
56950- 6 6 6 2 2 6 2 2 6 2 2 6
56951- 2 2 6 2 2 6 2 2 6 62 62 62
56952- 74 74 74 34 34 34 14 14 14 0 0 0
56953- 0 0 0 0 0 0 0 0 0 0 0 0
56954- 0 0 0 0 0 0 0 0 0 0 0 0
56955- 0 0 0 0 0 0 0 0 0 0 0 0
56956- 0 0 0 0 0 0 0 0 0 0 0 0
56957- 0 0 0 0 0 0 0 0 0 0 0 0
56958- 0 0 0 0 0 0 0 0 0 0 0 0
56959- 0 0 0 0 0 0 0 0 0 0 0 0
56960- 0 0 0 0 0 0 0 0 0 0 0 0
56961- 0 0 0 0 0 0 0 0 0 0 0 0
56962- 0 0 0 0 0 0 0 0 0 0 0 0
56963- 0 0 0 0 0 0 0 0 0 0 0 0
56964- 0 0 0 10 10 10 22 22 22 54 54 54
56965- 94 94 94 18 18 18 2 2 6 46 46 46
56966-234 234 234 221 221 221 190 190 190 190 190 190
56967-190 190 190 187 187 187 187 187 187 190 190 190
56968-190 190 190 195 195 195 214 214 214 242 242 242
56969-253 253 253 253 253 253 253 253 253 253 253 253
56970- 82 82 82 2 2 6 2 2 6 2 2 6
56971- 2 2 6 2 2 6 2 2 6 14 14 14
56972- 86 86 86 54 54 54 22 22 22 6 6 6
56973- 0 0 0 0 0 0 0 0 0 0 0 0
56974- 0 0 0 0 0 0 0 0 0 0 0 0
56975- 0 0 0 0 0 0 0 0 0 0 0 0
56976- 0 0 0 0 0 0 0 0 0 0 0 0
56977- 0 0 0 0 0 0 0 0 0 0 0 0
56978- 0 0 0 0 0 0 0 0 0 0 0 0
56979- 0 0 0 0 0 0 0 0 0 0 0 0
56980- 0 0 0 0 0 0 0 0 0 0 0 0
56981- 0 0 0 0 0 0 0 0 0 0 0 0
56982- 0 0 0 0 0 0 0 0 0 0 0 0
56983- 0 0 0 0 0 0 0 0 0 0 0 0
56984- 6 6 6 18 18 18 46 46 46 90 90 90
56985- 46 46 46 18 18 18 6 6 6 182 182 182
56986-253 253 253 246 246 246 206 206 206 190 190 190
56987-190 190 190 190 190 190 190 190 190 190 190 190
56988-206 206 206 231 231 231 250 250 250 253 253 253
56989-253 253 253 253 253 253 253 253 253 253 253 253
56990-202 202 202 14 14 14 2 2 6 2 2 6
56991- 2 2 6 2 2 6 2 2 6 2 2 6
56992- 42 42 42 86 86 86 42 42 42 18 18 18
56993- 6 6 6 0 0 0 0 0 0 0 0 0
56994- 0 0 0 0 0 0 0 0 0 0 0 0
56995- 0 0 0 0 0 0 0 0 0 0 0 0
56996- 0 0 0 0 0 0 0 0 0 0 0 0
56997- 0 0 0 0 0 0 0 0 0 0 0 0
56998- 0 0 0 0 0 0 0 0 0 0 0 0
56999- 0 0 0 0 0 0 0 0 0 0 0 0
57000- 0 0 0 0 0 0 0 0 0 0 0 0
57001- 0 0 0 0 0 0 0 0 0 0 0 0
57002- 0 0 0 0 0 0 0 0 0 0 0 0
57003- 0 0 0 0 0 0 0 0 0 6 6 6
57004- 14 14 14 38 38 38 74 74 74 66 66 66
57005- 2 2 6 6 6 6 90 90 90 250 250 250
57006-253 253 253 253 253 253 238 238 238 198 198 198
57007-190 190 190 190 190 190 195 195 195 221 221 221
57008-246 246 246 253 253 253 253 253 253 253 253 253
57009-253 253 253 253 253 253 253 253 253 253 253 253
57010-253 253 253 82 82 82 2 2 6 2 2 6
57011- 2 2 6 2 2 6 2 2 6 2 2 6
57012- 2 2 6 78 78 78 70 70 70 34 34 34
57013- 14 14 14 6 6 6 0 0 0 0 0 0
57014- 0 0 0 0 0 0 0 0 0 0 0 0
57015- 0 0 0 0 0 0 0 0 0 0 0 0
57016- 0 0 0 0 0 0 0 0 0 0 0 0
57017- 0 0 0 0 0 0 0 0 0 0 0 0
57018- 0 0 0 0 0 0 0 0 0 0 0 0
57019- 0 0 0 0 0 0 0 0 0 0 0 0
57020- 0 0 0 0 0 0 0 0 0 0 0 0
57021- 0 0 0 0 0 0 0 0 0 0 0 0
57022- 0 0 0 0 0 0 0 0 0 0 0 0
57023- 0 0 0 0 0 0 0 0 0 14 14 14
57024- 34 34 34 66 66 66 78 78 78 6 6 6
57025- 2 2 6 18 18 18 218 218 218 253 253 253
57026-253 253 253 253 253 253 253 253 253 246 246 246
57027-226 226 226 231 231 231 246 246 246 253 253 253
57028-253 253 253 253 253 253 253 253 253 253 253 253
57029-253 253 253 253 253 253 253 253 253 253 253 253
57030-253 253 253 178 178 178 2 2 6 2 2 6
57031- 2 2 6 2 2 6 2 2 6 2 2 6
57032- 2 2 6 18 18 18 90 90 90 62 62 62
57033- 30 30 30 10 10 10 0 0 0 0 0 0
57034- 0 0 0 0 0 0 0 0 0 0 0 0
57035- 0 0 0 0 0 0 0 0 0 0 0 0
57036- 0 0 0 0 0 0 0 0 0 0 0 0
57037- 0 0 0 0 0 0 0 0 0 0 0 0
57038- 0 0 0 0 0 0 0 0 0 0 0 0
57039- 0 0 0 0 0 0 0 0 0 0 0 0
57040- 0 0 0 0 0 0 0 0 0 0 0 0
57041- 0 0 0 0 0 0 0 0 0 0 0 0
57042- 0 0 0 0 0 0 0 0 0 0 0 0
57043- 0 0 0 0 0 0 10 10 10 26 26 26
57044- 58 58 58 90 90 90 18 18 18 2 2 6
57045- 2 2 6 110 110 110 253 253 253 253 253 253
57046-253 253 253 253 253 253 253 253 253 253 253 253
57047-250 250 250 253 253 253 253 253 253 253 253 253
57048-253 253 253 253 253 253 253 253 253 253 253 253
57049-253 253 253 253 253 253 253 253 253 253 253 253
57050-253 253 253 231 231 231 18 18 18 2 2 6
57051- 2 2 6 2 2 6 2 2 6 2 2 6
57052- 2 2 6 2 2 6 18 18 18 94 94 94
57053- 54 54 54 26 26 26 10 10 10 0 0 0
57054- 0 0 0 0 0 0 0 0 0 0 0 0
57055- 0 0 0 0 0 0 0 0 0 0 0 0
57056- 0 0 0 0 0 0 0 0 0 0 0 0
57057- 0 0 0 0 0 0 0 0 0 0 0 0
57058- 0 0 0 0 0 0 0 0 0 0 0 0
57059- 0 0 0 0 0 0 0 0 0 0 0 0
57060- 0 0 0 0 0 0 0 0 0 0 0 0
57061- 0 0 0 0 0 0 0 0 0 0 0 0
57062- 0 0 0 0 0 0 0 0 0 0 0 0
57063- 0 0 0 6 6 6 22 22 22 50 50 50
57064- 90 90 90 26 26 26 2 2 6 2 2 6
57065- 14 14 14 195 195 195 250 250 250 253 253 253
57066-253 253 253 253 253 253 253 253 253 253 253 253
57067-253 253 253 253 253 253 253 253 253 253 253 253
57068-253 253 253 253 253 253 253 253 253 253 253 253
57069-253 253 253 253 253 253 253 253 253 253 253 253
57070-250 250 250 242 242 242 54 54 54 2 2 6
57071- 2 2 6 2 2 6 2 2 6 2 2 6
57072- 2 2 6 2 2 6 2 2 6 38 38 38
57073- 86 86 86 50 50 50 22 22 22 6 6 6
57074- 0 0 0 0 0 0 0 0 0 0 0 0
57075- 0 0 0 0 0 0 0 0 0 0 0 0
57076- 0 0 0 0 0 0 0 0 0 0 0 0
57077- 0 0 0 0 0 0 0 0 0 0 0 0
57078- 0 0 0 0 0 0 0 0 0 0 0 0
57079- 0 0 0 0 0 0 0 0 0 0 0 0
57080- 0 0 0 0 0 0 0 0 0 0 0 0
57081- 0 0 0 0 0 0 0 0 0 0 0 0
57082- 0 0 0 0 0 0 0 0 0 0 0 0
57083- 6 6 6 14 14 14 38 38 38 82 82 82
57084- 34 34 34 2 2 6 2 2 6 2 2 6
57085- 42 42 42 195 195 195 246 246 246 253 253 253
57086-253 253 253 253 253 253 253 253 253 250 250 250
57087-242 242 242 242 242 242 250 250 250 253 253 253
57088-253 253 253 253 253 253 253 253 253 253 253 253
57089-253 253 253 250 250 250 246 246 246 238 238 238
57090-226 226 226 231 231 231 101 101 101 6 6 6
57091- 2 2 6 2 2 6 2 2 6 2 2 6
57092- 2 2 6 2 2 6 2 2 6 2 2 6
57093- 38 38 38 82 82 82 42 42 42 14 14 14
57094- 6 6 6 0 0 0 0 0 0 0 0 0
57095- 0 0 0 0 0 0 0 0 0 0 0 0
57096- 0 0 0 0 0 0 0 0 0 0 0 0
57097- 0 0 0 0 0 0 0 0 0 0 0 0
57098- 0 0 0 0 0 0 0 0 0 0 0 0
57099- 0 0 0 0 0 0 0 0 0 0 0 0
57100- 0 0 0 0 0 0 0 0 0 0 0 0
57101- 0 0 0 0 0 0 0 0 0 0 0 0
57102- 0 0 0 0 0 0 0 0 0 0 0 0
57103- 10 10 10 26 26 26 62 62 62 66 66 66
57104- 2 2 6 2 2 6 2 2 6 6 6 6
57105- 70 70 70 170 170 170 206 206 206 234 234 234
57106-246 246 246 250 250 250 250 250 250 238 238 238
57107-226 226 226 231 231 231 238 238 238 250 250 250
57108-250 250 250 250 250 250 246 246 246 231 231 231
57109-214 214 214 206 206 206 202 202 202 202 202 202
57110-198 198 198 202 202 202 182 182 182 18 18 18
57111- 2 2 6 2 2 6 2 2 6 2 2 6
57112- 2 2 6 2 2 6 2 2 6 2 2 6
57113- 2 2 6 62 62 62 66 66 66 30 30 30
57114- 10 10 10 0 0 0 0 0 0 0 0 0
57115- 0 0 0 0 0 0 0 0 0 0 0 0
57116- 0 0 0 0 0 0 0 0 0 0 0 0
57117- 0 0 0 0 0 0 0 0 0 0 0 0
57118- 0 0 0 0 0 0 0 0 0 0 0 0
57119- 0 0 0 0 0 0 0 0 0 0 0 0
57120- 0 0 0 0 0 0 0 0 0 0 0 0
57121- 0 0 0 0 0 0 0 0 0 0 0 0
57122- 0 0 0 0 0 0 0 0 0 0 0 0
57123- 14 14 14 42 42 42 82 82 82 18 18 18
57124- 2 2 6 2 2 6 2 2 6 10 10 10
57125- 94 94 94 182 182 182 218 218 218 242 242 242
57126-250 250 250 253 253 253 253 253 253 250 250 250
57127-234 234 234 253 253 253 253 253 253 253 253 253
57128-253 253 253 253 253 253 253 253 253 246 246 246
57129-238 238 238 226 226 226 210 210 210 202 202 202
57130-195 195 195 195 195 195 210 210 210 158 158 158
57131- 6 6 6 14 14 14 50 50 50 14 14 14
57132- 2 2 6 2 2 6 2 2 6 2 2 6
57133- 2 2 6 6 6 6 86 86 86 46 46 46
57134- 18 18 18 6 6 6 0 0 0 0 0 0
57135- 0 0 0 0 0 0 0 0 0 0 0 0
57136- 0 0 0 0 0 0 0 0 0 0 0 0
57137- 0 0 0 0 0 0 0 0 0 0 0 0
57138- 0 0 0 0 0 0 0 0 0 0 0 0
57139- 0 0 0 0 0 0 0 0 0 0 0 0
57140- 0 0 0 0 0 0 0 0 0 0 0 0
57141- 0 0 0 0 0 0 0 0 0 0 0 0
57142- 0 0 0 0 0 0 0 0 0 6 6 6
57143- 22 22 22 54 54 54 70 70 70 2 2 6
57144- 2 2 6 10 10 10 2 2 6 22 22 22
57145-166 166 166 231 231 231 250 250 250 253 253 253
57146-253 253 253 253 253 253 253 253 253 250 250 250
57147-242 242 242 253 253 253 253 253 253 253 253 253
57148-253 253 253 253 253 253 253 253 253 253 253 253
57149-253 253 253 253 253 253 253 253 253 246 246 246
57150-231 231 231 206 206 206 198 198 198 226 226 226
57151- 94 94 94 2 2 6 6 6 6 38 38 38
57152- 30 30 30 2 2 6 2 2 6 2 2 6
57153- 2 2 6 2 2 6 62 62 62 66 66 66
57154- 26 26 26 10 10 10 0 0 0 0 0 0
57155- 0 0 0 0 0 0 0 0 0 0 0 0
57156- 0 0 0 0 0 0 0 0 0 0 0 0
57157- 0 0 0 0 0 0 0 0 0 0 0 0
57158- 0 0 0 0 0 0 0 0 0 0 0 0
57159- 0 0 0 0 0 0 0 0 0 0 0 0
57160- 0 0 0 0 0 0 0 0 0 0 0 0
57161- 0 0 0 0 0 0 0 0 0 0 0 0
57162- 0 0 0 0 0 0 0 0 0 10 10 10
57163- 30 30 30 74 74 74 50 50 50 2 2 6
57164- 26 26 26 26 26 26 2 2 6 106 106 106
57165-238 238 238 253 253 253 253 253 253 253 253 253
57166-253 253 253 253 253 253 253 253 253 253 253 253
57167-253 253 253 253 253 253 253 253 253 253 253 253
57168-253 253 253 253 253 253 253 253 253 253 253 253
57169-253 253 253 253 253 253 253 253 253 253 253 253
57170-253 253 253 246 246 246 218 218 218 202 202 202
57171-210 210 210 14 14 14 2 2 6 2 2 6
57172- 30 30 30 22 22 22 2 2 6 2 2 6
57173- 2 2 6 2 2 6 18 18 18 86 86 86
57174- 42 42 42 14 14 14 0 0 0 0 0 0
57175- 0 0 0 0 0 0 0 0 0 0 0 0
57176- 0 0 0 0 0 0 0 0 0 0 0 0
57177- 0 0 0 0 0 0 0 0 0 0 0 0
57178- 0 0 0 0 0 0 0 0 0 0 0 0
57179- 0 0 0 0 0 0 0 0 0 0 0 0
57180- 0 0 0 0 0 0 0 0 0 0 0 0
57181- 0 0 0 0 0 0 0 0 0 0 0 0
57182- 0 0 0 0 0 0 0 0 0 14 14 14
57183- 42 42 42 90 90 90 22 22 22 2 2 6
57184- 42 42 42 2 2 6 18 18 18 218 218 218
57185-253 253 253 253 253 253 253 253 253 253 253 253
57186-253 253 253 253 253 253 253 253 253 253 253 253
57187-253 253 253 253 253 253 253 253 253 253 253 253
57188-253 253 253 253 253 253 253 253 253 253 253 253
57189-253 253 253 253 253 253 253 253 253 253 253 253
57190-253 253 253 253 253 253 250 250 250 221 221 221
57191-218 218 218 101 101 101 2 2 6 14 14 14
57192- 18 18 18 38 38 38 10 10 10 2 2 6
57193- 2 2 6 2 2 6 2 2 6 78 78 78
57194- 58 58 58 22 22 22 6 6 6 0 0 0
57195- 0 0 0 0 0 0 0 0 0 0 0 0
57196- 0 0 0 0 0 0 0 0 0 0 0 0
57197- 0 0 0 0 0 0 0 0 0 0 0 0
57198- 0 0 0 0 0 0 0 0 0 0 0 0
57199- 0 0 0 0 0 0 0 0 0 0 0 0
57200- 0 0 0 0 0 0 0 0 0 0 0 0
57201- 0 0 0 0 0 0 0 0 0 0 0 0
57202- 0 0 0 0 0 0 6 6 6 18 18 18
57203- 54 54 54 82 82 82 2 2 6 26 26 26
57204- 22 22 22 2 2 6 123 123 123 253 253 253
57205-253 253 253 253 253 253 253 253 253 253 253 253
57206-253 253 253 253 253 253 253 253 253 253 253 253
57207-253 253 253 253 253 253 253 253 253 253 253 253
57208-253 253 253 253 253 253 253 253 253 253 253 253
57209-253 253 253 253 253 253 253 253 253 253 253 253
57210-253 253 253 253 253 253 253 253 253 250 250 250
57211-238 238 238 198 198 198 6 6 6 38 38 38
57212- 58 58 58 26 26 26 38 38 38 2 2 6
57213- 2 2 6 2 2 6 2 2 6 46 46 46
57214- 78 78 78 30 30 30 10 10 10 0 0 0
57215- 0 0 0 0 0 0 0 0 0 0 0 0
57216- 0 0 0 0 0 0 0 0 0 0 0 0
57217- 0 0 0 0 0 0 0 0 0 0 0 0
57218- 0 0 0 0 0 0 0 0 0 0 0 0
57219- 0 0 0 0 0 0 0 0 0 0 0 0
57220- 0 0 0 0 0 0 0 0 0 0 0 0
57221- 0 0 0 0 0 0 0 0 0 0 0 0
57222- 0 0 0 0 0 0 10 10 10 30 30 30
57223- 74 74 74 58 58 58 2 2 6 42 42 42
57224- 2 2 6 22 22 22 231 231 231 253 253 253
57225-253 253 253 253 253 253 253 253 253 253 253 253
57226-253 253 253 253 253 253 253 253 253 250 250 250
57227-253 253 253 253 253 253 253 253 253 253 253 253
57228-253 253 253 253 253 253 253 253 253 253 253 253
57229-253 253 253 253 253 253 253 253 253 253 253 253
57230-253 253 253 253 253 253 253 253 253 253 253 253
57231-253 253 253 246 246 246 46 46 46 38 38 38
57232- 42 42 42 14 14 14 38 38 38 14 14 14
57233- 2 2 6 2 2 6 2 2 6 6 6 6
57234- 86 86 86 46 46 46 14 14 14 0 0 0
57235- 0 0 0 0 0 0 0 0 0 0 0 0
57236- 0 0 0 0 0 0 0 0 0 0 0 0
57237- 0 0 0 0 0 0 0 0 0 0 0 0
57238- 0 0 0 0 0 0 0 0 0 0 0 0
57239- 0 0 0 0 0 0 0 0 0 0 0 0
57240- 0 0 0 0 0 0 0 0 0 0 0 0
57241- 0 0 0 0 0 0 0 0 0 0 0 0
57242- 0 0 0 6 6 6 14 14 14 42 42 42
57243- 90 90 90 18 18 18 18 18 18 26 26 26
57244- 2 2 6 116 116 116 253 253 253 253 253 253
57245-253 253 253 253 253 253 253 253 253 253 253 253
57246-253 253 253 253 253 253 250 250 250 238 238 238
57247-253 253 253 253 253 253 253 253 253 253 253 253
57248-253 253 253 253 253 253 253 253 253 253 253 253
57249-253 253 253 253 253 253 253 253 253 253 253 253
57250-253 253 253 253 253 253 253 253 253 253 253 253
57251-253 253 253 253 253 253 94 94 94 6 6 6
57252- 2 2 6 2 2 6 10 10 10 34 34 34
57253- 2 2 6 2 2 6 2 2 6 2 2 6
57254- 74 74 74 58 58 58 22 22 22 6 6 6
57255- 0 0 0 0 0 0 0 0 0 0 0 0
57256- 0 0 0 0 0 0 0 0 0 0 0 0
57257- 0 0 0 0 0 0 0 0 0 0 0 0
57258- 0 0 0 0 0 0 0 0 0 0 0 0
57259- 0 0 0 0 0 0 0 0 0 0 0 0
57260- 0 0 0 0 0 0 0 0 0 0 0 0
57261- 0 0 0 0 0 0 0 0 0 0 0 0
57262- 0 0 0 10 10 10 26 26 26 66 66 66
57263- 82 82 82 2 2 6 38 38 38 6 6 6
57264- 14 14 14 210 210 210 253 253 253 253 253 253
57265-253 253 253 253 253 253 253 253 253 253 253 253
57266-253 253 253 253 253 253 246 246 246 242 242 242
57267-253 253 253 253 253 253 253 253 253 253 253 253
57268-253 253 253 253 253 253 253 253 253 253 253 253
57269-253 253 253 253 253 253 253 253 253 253 253 253
57270-253 253 253 253 253 253 253 253 253 253 253 253
57271-253 253 253 253 253 253 144 144 144 2 2 6
57272- 2 2 6 2 2 6 2 2 6 46 46 46
57273- 2 2 6 2 2 6 2 2 6 2 2 6
57274- 42 42 42 74 74 74 30 30 30 10 10 10
57275- 0 0 0 0 0 0 0 0 0 0 0 0
57276- 0 0 0 0 0 0 0 0 0 0 0 0
57277- 0 0 0 0 0 0 0 0 0 0 0 0
57278- 0 0 0 0 0 0 0 0 0 0 0 0
57279- 0 0 0 0 0 0 0 0 0 0 0 0
57280- 0 0 0 0 0 0 0 0 0 0 0 0
57281- 0 0 0 0 0 0 0 0 0 0 0 0
57282- 6 6 6 14 14 14 42 42 42 90 90 90
57283- 26 26 26 6 6 6 42 42 42 2 2 6
57284- 74 74 74 250 250 250 253 253 253 253 253 253
57285-253 253 253 253 253 253 253 253 253 253 253 253
57286-253 253 253 253 253 253 242 242 242 242 242 242
57287-253 253 253 253 253 253 253 253 253 253 253 253
57288-253 253 253 253 253 253 253 253 253 253 253 253
57289-253 253 253 253 253 253 253 253 253 253 253 253
57290-253 253 253 253 253 253 253 253 253 253 253 253
57291-253 253 253 253 253 253 182 182 182 2 2 6
57292- 2 2 6 2 2 6 2 2 6 46 46 46
57293- 2 2 6 2 2 6 2 2 6 2 2 6
57294- 10 10 10 86 86 86 38 38 38 10 10 10
57295- 0 0 0 0 0 0 0 0 0 0 0 0
57296- 0 0 0 0 0 0 0 0 0 0 0 0
57297- 0 0 0 0 0 0 0 0 0 0 0 0
57298- 0 0 0 0 0 0 0 0 0 0 0 0
57299- 0 0 0 0 0 0 0 0 0 0 0 0
57300- 0 0 0 0 0 0 0 0 0 0 0 0
57301- 0 0 0 0 0 0 0 0 0 0 0 0
57302- 10 10 10 26 26 26 66 66 66 82 82 82
57303- 2 2 6 22 22 22 18 18 18 2 2 6
57304-149 149 149 253 253 253 253 253 253 253 253 253
57305-253 253 253 253 253 253 253 253 253 253 253 253
57306-253 253 253 253 253 253 234 234 234 242 242 242
57307-253 253 253 253 253 253 253 253 253 253 253 253
57308-253 253 253 253 253 253 253 253 253 253 253 253
57309-253 253 253 253 253 253 253 253 253 253 253 253
57310-253 253 253 253 253 253 253 253 253 253 253 253
57311-253 253 253 253 253 253 206 206 206 2 2 6
57312- 2 2 6 2 2 6 2 2 6 38 38 38
57313- 2 2 6 2 2 6 2 2 6 2 2 6
57314- 6 6 6 86 86 86 46 46 46 14 14 14
57315- 0 0 0 0 0 0 0 0 0 0 0 0
57316- 0 0 0 0 0 0 0 0 0 0 0 0
57317- 0 0 0 0 0 0 0 0 0 0 0 0
57318- 0 0 0 0 0 0 0 0 0 0 0 0
57319- 0 0 0 0 0 0 0 0 0 0 0 0
57320- 0 0 0 0 0 0 0 0 0 0 0 0
57321- 0 0 0 0 0 0 0 0 0 6 6 6
57322- 18 18 18 46 46 46 86 86 86 18 18 18
57323- 2 2 6 34 34 34 10 10 10 6 6 6
57324-210 210 210 253 253 253 253 253 253 253 253 253
57325-253 253 253 253 253 253 253 253 253 253 253 253
57326-253 253 253 253 253 253 234 234 234 242 242 242
57327-253 253 253 253 253 253 253 253 253 253 253 253
57328-253 253 253 253 253 253 253 253 253 253 253 253
57329-253 253 253 253 253 253 253 253 253 253 253 253
57330-253 253 253 253 253 253 253 253 253 253 253 253
57331-253 253 253 253 253 253 221 221 221 6 6 6
57332- 2 2 6 2 2 6 6 6 6 30 30 30
57333- 2 2 6 2 2 6 2 2 6 2 2 6
57334- 2 2 6 82 82 82 54 54 54 18 18 18
57335- 6 6 6 0 0 0 0 0 0 0 0 0
57336- 0 0 0 0 0 0 0 0 0 0 0 0
57337- 0 0 0 0 0 0 0 0 0 0 0 0
57338- 0 0 0 0 0 0 0 0 0 0 0 0
57339- 0 0 0 0 0 0 0 0 0 0 0 0
57340- 0 0 0 0 0 0 0 0 0 0 0 0
57341- 0 0 0 0 0 0 0 0 0 10 10 10
57342- 26 26 26 66 66 66 62 62 62 2 2 6
57343- 2 2 6 38 38 38 10 10 10 26 26 26
57344-238 238 238 253 253 253 253 253 253 253 253 253
57345-253 253 253 253 253 253 253 253 253 253 253 253
57346-253 253 253 253 253 253 231 231 231 238 238 238
57347-253 253 253 253 253 253 253 253 253 253 253 253
57348-253 253 253 253 253 253 253 253 253 253 253 253
57349-253 253 253 253 253 253 253 253 253 253 253 253
57350-253 253 253 253 253 253 253 253 253 253 253 253
57351-253 253 253 253 253 253 231 231 231 6 6 6
57352- 2 2 6 2 2 6 10 10 10 30 30 30
57353- 2 2 6 2 2 6 2 2 6 2 2 6
57354- 2 2 6 66 66 66 58 58 58 22 22 22
57355- 6 6 6 0 0 0 0 0 0 0 0 0
57356- 0 0 0 0 0 0 0 0 0 0 0 0
57357- 0 0 0 0 0 0 0 0 0 0 0 0
57358- 0 0 0 0 0 0 0 0 0 0 0 0
57359- 0 0 0 0 0 0 0 0 0 0 0 0
57360- 0 0 0 0 0 0 0 0 0 0 0 0
57361- 0 0 0 0 0 0 0 0 0 10 10 10
57362- 38 38 38 78 78 78 6 6 6 2 2 6
57363- 2 2 6 46 46 46 14 14 14 42 42 42
57364-246 246 246 253 253 253 253 253 253 253 253 253
57365-253 253 253 253 253 253 253 253 253 253 253 253
57366-253 253 253 253 253 253 231 231 231 242 242 242
57367-253 253 253 253 253 253 253 253 253 253 253 253
57368-253 253 253 253 253 253 253 253 253 253 253 253
57369-253 253 253 253 253 253 253 253 253 253 253 253
57370-253 253 253 253 253 253 253 253 253 253 253 253
57371-253 253 253 253 253 253 234 234 234 10 10 10
57372- 2 2 6 2 2 6 22 22 22 14 14 14
57373- 2 2 6 2 2 6 2 2 6 2 2 6
57374- 2 2 6 66 66 66 62 62 62 22 22 22
57375- 6 6 6 0 0 0 0 0 0 0 0 0
57376- 0 0 0 0 0 0 0 0 0 0 0 0
57377- 0 0 0 0 0 0 0 0 0 0 0 0
57378- 0 0 0 0 0 0 0 0 0 0 0 0
57379- 0 0 0 0 0 0 0 0 0 0 0 0
57380- 0 0 0 0 0 0 0 0 0 0 0 0
57381- 0 0 0 0 0 0 6 6 6 18 18 18
57382- 50 50 50 74 74 74 2 2 6 2 2 6
57383- 14 14 14 70 70 70 34 34 34 62 62 62
57384-250 250 250 253 253 253 253 253 253 253 253 253
57385-253 253 253 253 253 253 253 253 253 253 253 253
57386-253 253 253 253 253 253 231 231 231 246 246 246
57387-253 253 253 253 253 253 253 253 253 253 253 253
57388-253 253 253 253 253 253 253 253 253 253 253 253
57389-253 253 253 253 253 253 253 253 253 253 253 253
57390-253 253 253 253 253 253 253 253 253 253 253 253
57391-253 253 253 253 253 253 234 234 234 14 14 14
57392- 2 2 6 2 2 6 30 30 30 2 2 6
57393- 2 2 6 2 2 6 2 2 6 2 2 6
57394- 2 2 6 66 66 66 62 62 62 22 22 22
57395- 6 6 6 0 0 0 0 0 0 0 0 0
57396- 0 0 0 0 0 0 0 0 0 0 0 0
57397- 0 0 0 0 0 0 0 0 0 0 0 0
57398- 0 0 0 0 0 0 0 0 0 0 0 0
57399- 0 0 0 0 0 0 0 0 0 0 0 0
57400- 0 0 0 0 0 0 0 0 0 0 0 0
57401- 0 0 0 0 0 0 6 6 6 18 18 18
57402- 54 54 54 62 62 62 2 2 6 2 2 6
57403- 2 2 6 30 30 30 46 46 46 70 70 70
57404-250 250 250 253 253 253 253 253 253 253 253 253
57405-253 253 253 253 253 253 253 253 253 253 253 253
57406-253 253 253 253 253 253 231 231 231 246 246 246
57407-253 253 253 253 253 253 253 253 253 253 253 253
57408-253 253 253 253 253 253 253 253 253 253 253 253
57409-253 253 253 253 253 253 253 253 253 253 253 253
57410-253 253 253 253 253 253 253 253 253 253 253 253
57411-253 253 253 253 253 253 226 226 226 10 10 10
57412- 2 2 6 6 6 6 30 30 30 2 2 6
57413- 2 2 6 2 2 6 2 2 6 2 2 6
57414- 2 2 6 66 66 66 58 58 58 22 22 22
57415- 6 6 6 0 0 0 0 0 0 0 0 0
57416- 0 0 0 0 0 0 0 0 0 0 0 0
57417- 0 0 0 0 0 0 0 0 0 0 0 0
57418- 0 0 0 0 0 0 0 0 0 0 0 0
57419- 0 0 0 0 0 0 0 0 0 0 0 0
57420- 0 0 0 0 0 0 0 0 0 0 0 0
57421- 0 0 0 0 0 0 6 6 6 22 22 22
57422- 58 58 58 62 62 62 2 2 6 2 2 6
57423- 2 2 6 2 2 6 30 30 30 78 78 78
57424-250 250 250 253 253 253 253 253 253 253 253 253
57425-253 253 253 253 253 253 253 253 253 253 253 253
57426-253 253 253 253 253 253 231 231 231 246 246 246
57427-253 253 253 253 253 253 253 253 253 253 253 253
57428-253 253 253 253 253 253 253 253 253 253 253 253
57429-253 253 253 253 253 253 253 253 253 253 253 253
57430-253 253 253 253 253 253 253 253 253 253 253 253
57431-253 253 253 253 253 253 206 206 206 2 2 6
57432- 22 22 22 34 34 34 18 14 6 22 22 22
57433- 26 26 26 18 18 18 6 6 6 2 2 6
57434- 2 2 6 82 82 82 54 54 54 18 18 18
57435- 6 6 6 0 0 0 0 0 0 0 0 0
57436- 0 0 0 0 0 0 0 0 0 0 0 0
57437- 0 0 0 0 0 0 0 0 0 0 0 0
57438- 0 0 0 0 0 0 0 0 0 0 0 0
57439- 0 0 0 0 0 0 0 0 0 0 0 0
57440- 0 0 0 0 0 0 0 0 0 0 0 0
57441- 0 0 0 0 0 0 6 6 6 26 26 26
57442- 62 62 62 106 106 106 74 54 14 185 133 11
57443-210 162 10 121 92 8 6 6 6 62 62 62
57444-238 238 238 253 253 253 253 253 253 253 253 253
57445-253 253 253 253 253 253 253 253 253 253 253 253
57446-253 253 253 253 253 253 231 231 231 246 246 246
57447-253 253 253 253 253 253 253 253 253 253 253 253
57448-253 253 253 253 253 253 253 253 253 253 253 253
57449-253 253 253 253 253 253 253 253 253 253 253 253
57450-253 253 253 253 253 253 253 253 253 253 253 253
57451-253 253 253 253 253 253 158 158 158 18 18 18
57452- 14 14 14 2 2 6 2 2 6 2 2 6
57453- 6 6 6 18 18 18 66 66 66 38 38 38
57454- 6 6 6 94 94 94 50 50 50 18 18 18
57455- 6 6 6 0 0 0 0 0 0 0 0 0
57456- 0 0 0 0 0 0 0 0 0 0 0 0
57457- 0 0 0 0 0 0 0 0 0 0 0 0
57458- 0 0 0 0 0 0 0 0 0 0 0 0
57459- 0 0 0 0 0 0 0 0 0 0 0 0
57460- 0 0 0 0 0 0 0 0 0 6 6 6
57461- 10 10 10 10 10 10 18 18 18 38 38 38
57462- 78 78 78 142 134 106 216 158 10 242 186 14
57463-246 190 14 246 190 14 156 118 10 10 10 10
57464- 90 90 90 238 238 238 253 253 253 253 253 253
57465-253 253 253 253 253 253 253 253 253 253 253 253
57466-253 253 253 253 253 253 231 231 231 250 250 250
57467-253 253 253 253 253 253 253 253 253 253 253 253
57468-253 253 253 253 253 253 253 253 253 253 253 253
57469-253 253 253 253 253 253 253 253 253 253 253 253
57470-253 253 253 253 253 253 253 253 253 246 230 190
57471-238 204 91 238 204 91 181 142 44 37 26 9
57472- 2 2 6 2 2 6 2 2 6 2 2 6
57473- 2 2 6 2 2 6 38 38 38 46 46 46
57474- 26 26 26 106 106 106 54 54 54 18 18 18
57475- 6 6 6 0 0 0 0 0 0 0 0 0
57476- 0 0 0 0 0 0 0 0 0 0 0 0
57477- 0 0 0 0 0 0 0 0 0 0 0 0
57478- 0 0 0 0 0 0 0 0 0 0 0 0
57479- 0 0 0 0 0 0 0 0 0 0 0 0
57480- 0 0 0 6 6 6 14 14 14 22 22 22
57481- 30 30 30 38 38 38 50 50 50 70 70 70
57482-106 106 106 190 142 34 226 170 11 242 186 14
57483-246 190 14 246 190 14 246 190 14 154 114 10
57484- 6 6 6 74 74 74 226 226 226 253 253 253
57485-253 253 253 253 253 253 253 253 253 253 253 253
57486-253 253 253 253 253 253 231 231 231 250 250 250
57487-253 253 253 253 253 253 253 253 253 253 253 253
57488-253 253 253 253 253 253 253 253 253 253 253 253
57489-253 253 253 253 253 253 253 253 253 253 253 253
57490-253 253 253 253 253 253 253 253 253 228 184 62
57491-241 196 14 241 208 19 232 195 16 38 30 10
57492- 2 2 6 2 2 6 2 2 6 2 2 6
57493- 2 2 6 6 6 6 30 30 30 26 26 26
57494-203 166 17 154 142 90 66 66 66 26 26 26
57495- 6 6 6 0 0 0 0 0 0 0 0 0
57496- 0 0 0 0 0 0 0 0 0 0 0 0
57497- 0 0 0 0 0 0 0 0 0 0 0 0
57498- 0 0 0 0 0 0 0 0 0 0 0 0
57499- 0 0 0 0 0 0 0 0 0 0 0 0
57500- 6 6 6 18 18 18 38 38 38 58 58 58
57501- 78 78 78 86 86 86 101 101 101 123 123 123
57502-175 146 61 210 150 10 234 174 13 246 186 14
57503-246 190 14 246 190 14 246 190 14 238 190 10
57504-102 78 10 2 2 6 46 46 46 198 198 198
57505-253 253 253 253 253 253 253 253 253 253 253 253
57506-253 253 253 253 253 253 234 234 234 242 242 242
57507-253 253 253 253 253 253 253 253 253 253 253 253
57508-253 253 253 253 253 253 253 253 253 253 253 253
57509-253 253 253 253 253 253 253 253 253 253 253 253
57510-253 253 253 253 253 253 253 253 253 224 178 62
57511-242 186 14 241 196 14 210 166 10 22 18 6
57512- 2 2 6 2 2 6 2 2 6 2 2 6
57513- 2 2 6 2 2 6 6 6 6 121 92 8
57514-238 202 15 232 195 16 82 82 82 34 34 34
57515- 10 10 10 0 0 0 0 0 0 0 0 0
57516- 0 0 0 0 0 0 0 0 0 0 0 0
57517- 0 0 0 0 0 0 0 0 0 0 0 0
57518- 0 0 0 0 0 0 0 0 0 0 0 0
57519- 0 0 0 0 0 0 0 0 0 0 0 0
57520- 14 14 14 38 38 38 70 70 70 154 122 46
57521-190 142 34 200 144 11 197 138 11 197 138 11
57522-213 154 11 226 170 11 242 186 14 246 190 14
57523-246 190 14 246 190 14 246 190 14 246 190 14
57524-225 175 15 46 32 6 2 2 6 22 22 22
57525-158 158 158 250 250 250 253 253 253 253 253 253
57526-253 253 253 253 253 253 253 253 253 253 253 253
57527-253 253 253 253 253 253 253 253 253 253 253 253
57528-253 253 253 253 253 253 253 253 253 253 253 253
57529-253 253 253 253 253 253 253 253 253 253 253 253
57530-253 253 253 250 250 250 242 242 242 224 178 62
57531-239 182 13 236 186 11 213 154 11 46 32 6
57532- 2 2 6 2 2 6 2 2 6 2 2 6
57533- 2 2 6 2 2 6 61 42 6 225 175 15
57534-238 190 10 236 186 11 112 100 78 42 42 42
57535- 14 14 14 0 0 0 0 0 0 0 0 0
57536- 0 0 0 0 0 0 0 0 0 0 0 0
57537- 0 0 0 0 0 0 0 0 0 0 0 0
57538- 0 0 0 0 0 0 0 0 0 0 0 0
57539- 0 0 0 0 0 0 0 0 0 6 6 6
57540- 22 22 22 54 54 54 154 122 46 213 154 11
57541-226 170 11 230 174 11 226 170 11 226 170 11
57542-236 178 12 242 186 14 246 190 14 246 190 14
57543-246 190 14 246 190 14 246 190 14 246 190 14
57544-241 196 14 184 144 12 10 10 10 2 2 6
57545- 6 6 6 116 116 116 242 242 242 253 253 253
57546-253 253 253 253 253 253 253 253 253 253 253 253
57547-253 253 253 253 253 253 253 253 253 253 253 253
57548-253 253 253 253 253 253 253 253 253 253 253 253
57549-253 253 253 253 253 253 253 253 253 253 253 253
57550-253 253 253 231 231 231 198 198 198 214 170 54
57551-236 178 12 236 178 12 210 150 10 137 92 6
57552- 18 14 6 2 2 6 2 2 6 2 2 6
57553- 6 6 6 70 47 6 200 144 11 236 178 12
57554-239 182 13 239 182 13 124 112 88 58 58 58
57555- 22 22 22 6 6 6 0 0 0 0 0 0
57556- 0 0 0 0 0 0 0 0 0 0 0 0
57557- 0 0 0 0 0 0 0 0 0 0 0 0
57558- 0 0 0 0 0 0 0 0 0 0 0 0
57559- 0 0 0 0 0 0 0 0 0 10 10 10
57560- 30 30 30 70 70 70 180 133 36 226 170 11
57561-239 182 13 242 186 14 242 186 14 246 186 14
57562-246 190 14 246 190 14 246 190 14 246 190 14
57563-246 190 14 246 190 14 246 190 14 246 190 14
57564-246 190 14 232 195 16 98 70 6 2 2 6
57565- 2 2 6 2 2 6 66 66 66 221 221 221
57566-253 253 253 253 253 253 253 253 253 253 253 253
57567-253 253 253 253 253 253 253 253 253 253 253 253
57568-253 253 253 253 253 253 253 253 253 253 253 253
57569-253 253 253 253 253 253 253 253 253 253 253 253
57570-253 253 253 206 206 206 198 198 198 214 166 58
57571-230 174 11 230 174 11 216 158 10 192 133 9
57572-163 110 8 116 81 8 102 78 10 116 81 8
57573-167 114 7 197 138 11 226 170 11 239 182 13
57574-242 186 14 242 186 14 162 146 94 78 78 78
57575- 34 34 34 14 14 14 6 6 6 0 0 0
57576- 0 0 0 0 0 0 0 0 0 0 0 0
57577- 0 0 0 0 0 0 0 0 0 0 0 0
57578- 0 0 0 0 0 0 0 0 0 0 0 0
57579- 0 0 0 0 0 0 0 0 0 6 6 6
57580- 30 30 30 78 78 78 190 142 34 226 170 11
57581-239 182 13 246 190 14 246 190 14 246 190 14
57582-246 190 14 246 190 14 246 190 14 246 190 14
57583-246 190 14 246 190 14 246 190 14 246 190 14
57584-246 190 14 241 196 14 203 166 17 22 18 6
57585- 2 2 6 2 2 6 2 2 6 38 38 38
57586-218 218 218 253 253 253 253 253 253 253 253 253
57587-253 253 253 253 253 253 253 253 253 253 253 253
57588-253 253 253 253 253 253 253 253 253 253 253 253
57589-253 253 253 253 253 253 253 253 253 253 253 253
57590-250 250 250 206 206 206 198 198 198 202 162 69
57591-226 170 11 236 178 12 224 166 10 210 150 10
57592-200 144 11 197 138 11 192 133 9 197 138 11
57593-210 150 10 226 170 11 242 186 14 246 190 14
57594-246 190 14 246 186 14 225 175 15 124 112 88
57595- 62 62 62 30 30 30 14 14 14 6 6 6
57596- 0 0 0 0 0 0 0 0 0 0 0 0
57597- 0 0 0 0 0 0 0 0 0 0 0 0
57598- 0 0 0 0 0 0 0 0 0 0 0 0
57599- 0 0 0 0 0 0 0 0 0 10 10 10
57600- 30 30 30 78 78 78 174 135 50 224 166 10
57601-239 182 13 246 190 14 246 190 14 246 190 14
57602-246 190 14 246 190 14 246 190 14 246 190 14
57603-246 190 14 246 190 14 246 190 14 246 190 14
57604-246 190 14 246 190 14 241 196 14 139 102 15
57605- 2 2 6 2 2 6 2 2 6 2 2 6
57606- 78 78 78 250 250 250 253 253 253 253 253 253
57607-253 253 253 253 253 253 253 253 253 253 253 253
57608-253 253 253 253 253 253 253 253 253 253 253 253
57609-253 253 253 253 253 253 253 253 253 253 253 253
57610-250 250 250 214 214 214 198 198 198 190 150 46
57611-219 162 10 236 178 12 234 174 13 224 166 10
57612-216 158 10 213 154 11 213 154 11 216 158 10
57613-226 170 11 239 182 13 246 190 14 246 190 14
57614-246 190 14 246 190 14 242 186 14 206 162 42
57615-101 101 101 58 58 58 30 30 30 14 14 14
57616- 6 6 6 0 0 0 0 0 0 0 0 0
57617- 0 0 0 0 0 0 0 0 0 0 0 0
57618- 0 0 0 0 0 0 0 0 0 0 0 0
57619- 0 0 0 0 0 0 0 0 0 10 10 10
57620- 30 30 30 74 74 74 174 135 50 216 158 10
57621-236 178 12 246 190 14 246 190 14 246 190 14
57622-246 190 14 246 190 14 246 190 14 246 190 14
57623-246 190 14 246 190 14 246 190 14 246 190 14
57624-246 190 14 246 190 14 241 196 14 226 184 13
57625- 61 42 6 2 2 6 2 2 6 2 2 6
57626- 22 22 22 238 238 238 253 253 253 253 253 253
57627-253 253 253 253 253 253 253 253 253 253 253 253
57628-253 253 253 253 253 253 253 253 253 253 253 253
57629-253 253 253 253 253 253 253 253 253 253 253 253
57630-253 253 253 226 226 226 187 187 187 180 133 36
57631-216 158 10 236 178 12 239 182 13 236 178 12
57632-230 174 11 226 170 11 226 170 11 230 174 11
57633-236 178 12 242 186 14 246 190 14 246 190 14
57634-246 190 14 246 190 14 246 186 14 239 182 13
57635-206 162 42 106 106 106 66 66 66 34 34 34
57636- 14 14 14 6 6 6 0 0 0 0 0 0
57637- 0 0 0 0 0 0 0 0 0 0 0 0
57638- 0 0 0 0 0 0 0 0 0 0 0 0
57639- 0 0 0 0 0 0 0 0 0 6 6 6
57640- 26 26 26 70 70 70 163 133 67 213 154 11
57641-236 178 12 246 190 14 246 190 14 246 190 14
57642-246 190 14 246 190 14 246 190 14 246 190 14
57643-246 190 14 246 190 14 246 190 14 246 190 14
57644-246 190 14 246 190 14 246 190 14 241 196 14
57645-190 146 13 18 14 6 2 2 6 2 2 6
57646- 46 46 46 246 246 246 253 253 253 253 253 253
57647-253 253 253 253 253 253 253 253 253 253 253 253
57648-253 253 253 253 253 253 253 253 253 253 253 253
57649-253 253 253 253 253 253 253 253 253 253 253 253
57650-253 253 253 221 221 221 86 86 86 156 107 11
57651-216 158 10 236 178 12 242 186 14 246 186 14
57652-242 186 14 239 182 13 239 182 13 242 186 14
57653-242 186 14 246 186 14 246 190 14 246 190 14
57654-246 190 14 246 190 14 246 190 14 246 190 14
57655-242 186 14 225 175 15 142 122 72 66 66 66
57656- 30 30 30 10 10 10 0 0 0 0 0 0
57657- 0 0 0 0 0 0 0 0 0 0 0 0
57658- 0 0 0 0 0 0 0 0 0 0 0 0
57659- 0 0 0 0 0 0 0 0 0 6 6 6
57660- 26 26 26 70 70 70 163 133 67 210 150 10
57661-236 178 12 246 190 14 246 190 14 246 190 14
57662-246 190 14 246 190 14 246 190 14 246 190 14
57663-246 190 14 246 190 14 246 190 14 246 190 14
57664-246 190 14 246 190 14 246 190 14 246 190 14
57665-232 195 16 121 92 8 34 34 34 106 106 106
57666-221 221 221 253 253 253 253 253 253 253 253 253
57667-253 253 253 253 253 253 253 253 253 253 253 253
57668-253 253 253 253 253 253 253 253 253 253 253 253
57669-253 253 253 253 253 253 253 253 253 253 253 253
57670-242 242 242 82 82 82 18 14 6 163 110 8
57671-216 158 10 236 178 12 242 186 14 246 190 14
57672-246 190 14 246 190 14 246 190 14 246 190 14
57673-246 190 14 246 190 14 246 190 14 246 190 14
57674-246 190 14 246 190 14 246 190 14 246 190 14
57675-246 190 14 246 190 14 242 186 14 163 133 67
57676- 46 46 46 18 18 18 6 6 6 0 0 0
57677- 0 0 0 0 0 0 0 0 0 0 0 0
57678- 0 0 0 0 0 0 0 0 0 0 0 0
57679- 0 0 0 0 0 0 0 0 0 10 10 10
57680- 30 30 30 78 78 78 163 133 67 210 150 10
57681-236 178 12 246 186 14 246 190 14 246 190 14
57682-246 190 14 246 190 14 246 190 14 246 190 14
57683-246 190 14 246 190 14 246 190 14 246 190 14
57684-246 190 14 246 190 14 246 190 14 246 190 14
57685-241 196 14 215 174 15 190 178 144 253 253 253
57686-253 253 253 253 253 253 253 253 253 253 253 253
57687-253 253 253 253 253 253 253 253 253 253 253 253
57688-253 253 253 253 253 253 253 253 253 253 253 253
57689-253 253 253 253 253 253 253 253 253 218 218 218
57690- 58 58 58 2 2 6 22 18 6 167 114 7
57691-216 158 10 236 178 12 246 186 14 246 190 14
57692-246 190 14 246 190 14 246 190 14 246 190 14
57693-246 190 14 246 190 14 246 190 14 246 190 14
57694-246 190 14 246 190 14 246 190 14 246 190 14
57695-246 190 14 246 186 14 242 186 14 190 150 46
57696- 54 54 54 22 22 22 6 6 6 0 0 0
57697- 0 0 0 0 0 0 0 0 0 0 0 0
57698- 0 0 0 0 0 0 0 0 0 0 0 0
57699- 0 0 0 0 0 0 0 0 0 14 14 14
57700- 38 38 38 86 86 86 180 133 36 213 154 11
57701-236 178 12 246 186 14 246 190 14 246 190 14
57702-246 190 14 246 190 14 246 190 14 246 190 14
57703-246 190 14 246 190 14 246 190 14 246 190 14
57704-246 190 14 246 190 14 246 190 14 246 190 14
57705-246 190 14 232 195 16 190 146 13 214 214 214
57706-253 253 253 253 253 253 253 253 253 253 253 253
57707-253 253 253 253 253 253 253 253 253 253 253 253
57708-253 253 253 253 253 253 253 253 253 253 253 253
57709-253 253 253 250 250 250 170 170 170 26 26 26
57710- 2 2 6 2 2 6 37 26 9 163 110 8
57711-219 162 10 239 182 13 246 186 14 246 190 14
57712-246 190 14 246 190 14 246 190 14 246 190 14
57713-246 190 14 246 190 14 246 190 14 246 190 14
57714-246 190 14 246 190 14 246 190 14 246 190 14
57715-246 186 14 236 178 12 224 166 10 142 122 72
57716- 46 46 46 18 18 18 6 6 6 0 0 0
57717- 0 0 0 0 0 0 0 0 0 0 0 0
57718- 0 0 0 0 0 0 0 0 0 0 0 0
57719- 0 0 0 0 0 0 6 6 6 18 18 18
57720- 50 50 50 109 106 95 192 133 9 224 166 10
57721-242 186 14 246 190 14 246 190 14 246 190 14
57722-246 190 14 246 190 14 246 190 14 246 190 14
57723-246 190 14 246 190 14 246 190 14 246 190 14
57724-246 190 14 246 190 14 246 190 14 246 190 14
57725-242 186 14 226 184 13 210 162 10 142 110 46
57726-226 226 226 253 253 253 253 253 253 253 253 253
57727-253 253 253 253 253 253 253 253 253 253 253 253
57728-253 253 253 253 253 253 253 253 253 253 253 253
57729-198 198 198 66 66 66 2 2 6 2 2 6
57730- 2 2 6 2 2 6 50 34 6 156 107 11
57731-219 162 10 239 182 13 246 186 14 246 190 14
57732-246 190 14 246 190 14 246 190 14 246 190 14
57733-246 190 14 246 190 14 246 190 14 246 190 14
57734-246 190 14 246 190 14 246 190 14 242 186 14
57735-234 174 13 213 154 11 154 122 46 66 66 66
57736- 30 30 30 10 10 10 0 0 0 0 0 0
57737- 0 0 0 0 0 0 0 0 0 0 0 0
57738- 0 0 0 0 0 0 0 0 0 0 0 0
57739- 0 0 0 0 0 0 6 6 6 22 22 22
57740- 58 58 58 154 121 60 206 145 10 234 174 13
57741-242 186 14 246 186 14 246 190 14 246 190 14
57742-246 190 14 246 190 14 246 190 14 246 190 14
57743-246 190 14 246 190 14 246 190 14 246 190 14
57744-246 190 14 246 190 14 246 190 14 246 190 14
57745-246 186 14 236 178 12 210 162 10 163 110 8
57746- 61 42 6 138 138 138 218 218 218 250 250 250
57747-253 253 253 253 253 253 253 253 253 250 250 250
57748-242 242 242 210 210 210 144 144 144 66 66 66
57749- 6 6 6 2 2 6 2 2 6 2 2 6
57750- 2 2 6 2 2 6 61 42 6 163 110 8
57751-216 158 10 236 178 12 246 190 14 246 190 14
57752-246 190 14 246 190 14 246 190 14 246 190 14
57753-246 190 14 246 190 14 246 190 14 246 190 14
57754-246 190 14 239 182 13 230 174 11 216 158 10
57755-190 142 34 124 112 88 70 70 70 38 38 38
57756- 18 18 18 6 6 6 0 0 0 0 0 0
57757- 0 0 0 0 0 0 0 0 0 0 0 0
57758- 0 0 0 0 0 0 0 0 0 0 0 0
57759- 0 0 0 0 0 0 6 6 6 22 22 22
57760- 62 62 62 168 124 44 206 145 10 224 166 10
57761-236 178 12 239 182 13 242 186 14 242 186 14
57762-246 186 14 246 190 14 246 190 14 246 190 14
57763-246 190 14 246 190 14 246 190 14 246 190 14
57764-246 190 14 246 190 14 246 190 14 246 190 14
57765-246 190 14 236 178 12 216 158 10 175 118 6
57766- 80 54 7 2 2 6 6 6 6 30 30 30
57767- 54 54 54 62 62 62 50 50 50 38 38 38
57768- 14 14 14 2 2 6 2 2 6 2 2 6
57769- 2 2 6 2 2 6 2 2 6 2 2 6
57770- 2 2 6 6 6 6 80 54 7 167 114 7
57771-213 154 11 236 178 12 246 190 14 246 190 14
57772-246 190 14 246 190 14 246 190 14 246 190 14
57773-246 190 14 242 186 14 239 182 13 239 182 13
57774-230 174 11 210 150 10 174 135 50 124 112 88
57775- 82 82 82 54 54 54 34 34 34 18 18 18
57776- 6 6 6 0 0 0 0 0 0 0 0 0
57777- 0 0 0 0 0 0 0 0 0 0 0 0
57778- 0 0 0 0 0 0 0 0 0 0 0 0
57779- 0 0 0 0 0 0 6 6 6 18 18 18
57780- 50 50 50 158 118 36 192 133 9 200 144 11
57781-216 158 10 219 162 10 224 166 10 226 170 11
57782-230 174 11 236 178 12 239 182 13 239 182 13
57783-242 186 14 246 186 14 246 190 14 246 190 14
57784-246 190 14 246 190 14 246 190 14 246 190 14
57785-246 186 14 230 174 11 210 150 10 163 110 8
57786-104 69 6 10 10 10 2 2 6 2 2 6
57787- 2 2 6 2 2 6 2 2 6 2 2 6
57788- 2 2 6 2 2 6 2 2 6 2 2 6
57789- 2 2 6 2 2 6 2 2 6 2 2 6
57790- 2 2 6 6 6 6 91 60 6 167 114 7
57791-206 145 10 230 174 11 242 186 14 246 190 14
57792-246 190 14 246 190 14 246 186 14 242 186 14
57793-239 182 13 230 174 11 224 166 10 213 154 11
57794-180 133 36 124 112 88 86 86 86 58 58 58
57795- 38 38 38 22 22 22 10 10 10 6 6 6
57796- 0 0 0 0 0 0 0 0 0 0 0 0
57797- 0 0 0 0 0 0 0 0 0 0 0 0
57798- 0 0 0 0 0 0 0 0 0 0 0 0
57799- 0 0 0 0 0 0 0 0 0 14 14 14
57800- 34 34 34 70 70 70 138 110 50 158 118 36
57801-167 114 7 180 123 7 192 133 9 197 138 11
57802-200 144 11 206 145 10 213 154 11 219 162 10
57803-224 166 10 230 174 11 239 182 13 242 186 14
57804-246 186 14 246 186 14 246 186 14 246 186 14
57805-239 182 13 216 158 10 185 133 11 152 99 6
57806-104 69 6 18 14 6 2 2 6 2 2 6
57807- 2 2 6 2 2 6 2 2 6 2 2 6
57808- 2 2 6 2 2 6 2 2 6 2 2 6
57809- 2 2 6 2 2 6 2 2 6 2 2 6
57810- 2 2 6 6 6 6 80 54 7 152 99 6
57811-192 133 9 219 162 10 236 178 12 239 182 13
57812-246 186 14 242 186 14 239 182 13 236 178 12
57813-224 166 10 206 145 10 192 133 9 154 121 60
57814- 94 94 94 62 62 62 42 42 42 22 22 22
57815- 14 14 14 6 6 6 0 0 0 0 0 0
57816- 0 0 0 0 0 0 0 0 0 0 0 0
57817- 0 0 0 0 0 0 0 0 0 0 0 0
57818- 0 0 0 0 0 0 0 0 0 0 0 0
57819- 0 0 0 0 0 0 0 0 0 6 6 6
57820- 18 18 18 34 34 34 58 58 58 78 78 78
57821-101 98 89 124 112 88 142 110 46 156 107 11
57822-163 110 8 167 114 7 175 118 6 180 123 7
57823-185 133 11 197 138 11 210 150 10 219 162 10
57824-226 170 11 236 178 12 236 178 12 234 174 13
57825-219 162 10 197 138 11 163 110 8 130 83 6
57826- 91 60 6 10 10 10 2 2 6 2 2 6
57827- 18 18 18 38 38 38 38 38 38 38 38 38
57828- 38 38 38 38 38 38 38 38 38 38 38 38
57829- 38 38 38 38 38 38 26 26 26 2 2 6
57830- 2 2 6 6 6 6 70 47 6 137 92 6
57831-175 118 6 200 144 11 219 162 10 230 174 11
57832-234 174 13 230 174 11 219 162 10 210 150 10
57833-192 133 9 163 110 8 124 112 88 82 82 82
57834- 50 50 50 30 30 30 14 14 14 6 6 6
57835- 0 0 0 0 0 0 0 0 0 0 0 0
57836- 0 0 0 0 0 0 0 0 0 0 0 0
57837- 0 0 0 0 0 0 0 0 0 0 0 0
57838- 0 0 0 0 0 0 0 0 0 0 0 0
57839- 0 0 0 0 0 0 0 0 0 0 0 0
57840- 6 6 6 14 14 14 22 22 22 34 34 34
57841- 42 42 42 58 58 58 74 74 74 86 86 86
57842-101 98 89 122 102 70 130 98 46 121 87 25
57843-137 92 6 152 99 6 163 110 8 180 123 7
57844-185 133 11 197 138 11 206 145 10 200 144 11
57845-180 123 7 156 107 11 130 83 6 104 69 6
57846- 50 34 6 54 54 54 110 110 110 101 98 89
57847- 86 86 86 82 82 82 78 78 78 78 78 78
57848- 78 78 78 78 78 78 78 78 78 78 78 78
57849- 78 78 78 82 82 82 86 86 86 94 94 94
57850-106 106 106 101 101 101 86 66 34 124 80 6
57851-156 107 11 180 123 7 192 133 9 200 144 11
57852-206 145 10 200 144 11 192 133 9 175 118 6
57853-139 102 15 109 106 95 70 70 70 42 42 42
57854- 22 22 22 10 10 10 0 0 0 0 0 0
57855- 0 0 0 0 0 0 0 0 0 0 0 0
57856- 0 0 0 0 0 0 0 0 0 0 0 0
57857- 0 0 0 0 0 0 0 0 0 0 0 0
57858- 0 0 0 0 0 0 0 0 0 0 0 0
57859- 0 0 0 0 0 0 0 0 0 0 0 0
57860- 0 0 0 0 0 0 6 6 6 10 10 10
57861- 14 14 14 22 22 22 30 30 30 38 38 38
57862- 50 50 50 62 62 62 74 74 74 90 90 90
57863-101 98 89 112 100 78 121 87 25 124 80 6
57864-137 92 6 152 99 6 152 99 6 152 99 6
57865-138 86 6 124 80 6 98 70 6 86 66 30
57866-101 98 89 82 82 82 58 58 58 46 46 46
57867- 38 38 38 34 34 34 34 34 34 34 34 34
57868- 34 34 34 34 34 34 34 34 34 34 34 34
57869- 34 34 34 34 34 34 38 38 38 42 42 42
57870- 54 54 54 82 82 82 94 86 76 91 60 6
57871-134 86 6 156 107 11 167 114 7 175 118 6
57872-175 118 6 167 114 7 152 99 6 121 87 25
57873-101 98 89 62 62 62 34 34 34 18 18 18
57874- 6 6 6 0 0 0 0 0 0 0 0 0
57875- 0 0 0 0 0 0 0 0 0 0 0 0
57876- 0 0 0 0 0 0 0 0 0 0 0 0
57877- 0 0 0 0 0 0 0 0 0 0 0 0
57878- 0 0 0 0 0 0 0 0 0 0 0 0
57879- 0 0 0 0 0 0 0 0 0 0 0 0
57880- 0 0 0 0 0 0 0 0 0 0 0 0
57881- 0 0 0 6 6 6 6 6 6 10 10 10
57882- 18 18 18 22 22 22 30 30 30 42 42 42
57883- 50 50 50 66 66 66 86 86 86 101 98 89
57884-106 86 58 98 70 6 104 69 6 104 69 6
57885-104 69 6 91 60 6 82 62 34 90 90 90
57886- 62 62 62 38 38 38 22 22 22 14 14 14
57887- 10 10 10 10 10 10 10 10 10 10 10 10
57888- 10 10 10 10 10 10 6 6 6 10 10 10
57889- 10 10 10 10 10 10 10 10 10 14 14 14
57890- 22 22 22 42 42 42 70 70 70 89 81 66
57891- 80 54 7 104 69 6 124 80 6 137 92 6
57892-134 86 6 116 81 8 100 82 52 86 86 86
57893- 58 58 58 30 30 30 14 14 14 6 6 6
57894- 0 0 0 0 0 0 0 0 0 0 0 0
57895- 0 0 0 0 0 0 0 0 0 0 0 0
57896- 0 0 0 0 0 0 0 0 0 0 0 0
57897- 0 0 0 0 0 0 0 0 0 0 0 0
57898- 0 0 0 0 0 0 0 0 0 0 0 0
57899- 0 0 0 0 0 0 0 0 0 0 0 0
57900- 0 0 0 0 0 0 0 0 0 0 0 0
57901- 0 0 0 0 0 0 0 0 0 0 0 0
57902- 0 0 0 6 6 6 10 10 10 14 14 14
57903- 18 18 18 26 26 26 38 38 38 54 54 54
57904- 70 70 70 86 86 86 94 86 76 89 81 66
57905- 89 81 66 86 86 86 74 74 74 50 50 50
57906- 30 30 30 14 14 14 6 6 6 0 0 0
57907- 0 0 0 0 0 0 0 0 0 0 0 0
57908- 0 0 0 0 0 0 0 0 0 0 0 0
57909- 0 0 0 0 0 0 0 0 0 0 0 0
57910- 6 6 6 18 18 18 34 34 34 58 58 58
57911- 82 82 82 89 81 66 89 81 66 89 81 66
57912- 94 86 66 94 86 76 74 74 74 50 50 50
57913- 26 26 26 14 14 14 6 6 6 0 0 0
57914- 0 0 0 0 0 0 0 0 0 0 0 0
57915- 0 0 0 0 0 0 0 0 0 0 0 0
57916- 0 0 0 0 0 0 0 0 0 0 0 0
57917- 0 0 0 0 0 0 0 0 0 0 0 0
57918- 0 0 0 0 0 0 0 0 0 0 0 0
57919- 0 0 0 0 0 0 0 0 0 0 0 0
57920- 0 0 0 0 0 0 0 0 0 0 0 0
57921- 0 0 0 0 0 0 0 0 0 0 0 0
57922- 0 0 0 0 0 0 0 0 0 0 0 0
57923- 6 6 6 6 6 6 14 14 14 18 18 18
57924- 30 30 30 38 38 38 46 46 46 54 54 54
57925- 50 50 50 42 42 42 30 30 30 18 18 18
57926- 10 10 10 0 0 0 0 0 0 0 0 0
57927- 0 0 0 0 0 0 0 0 0 0 0 0
57928- 0 0 0 0 0 0 0 0 0 0 0 0
57929- 0 0 0 0 0 0 0 0 0 0 0 0
57930- 0 0 0 6 6 6 14 14 14 26 26 26
57931- 38 38 38 50 50 50 58 58 58 58 58 58
57932- 54 54 54 42 42 42 30 30 30 18 18 18
57933- 10 10 10 0 0 0 0 0 0 0 0 0
57934- 0 0 0 0 0 0 0 0 0 0 0 0
57935- 0 0 0 0 0 0 0 0 0 0 0 0
57936- 0 0 0 0 0 0 0 0 0 0 0 0
57937- 0 0 0 0 0 0 0 0 0 0 0 0
57938- 0 0 0 0 0 0 0 0 0 0 0 0
57939- 0 0 0 0 0 0 0 0 0 0 0 0
57940- 0 0 0 0 0 0 0 0 0 0 0 0
57941- 0 0 0 0 0 0 0 0 0 0 0 0
57942- 0 0 0 0 0 0 0 0 0 0 0 0
57943- 0 0 0 0 0 0 0 0 0 6 6 6
57944- 6 6 6 10 10 10 14 14 14 18 18 18
57945- 18 18 18 14 14 14 10 10 10 6 6 6
57946- 0 0 0 0 0 0 0 0 0 0 0 0
57947- 0 0 0 0 0 0 0 0 0 0 0 0
57948- 0 0 0 0 0 0 0 0 0 0 0 0
57949- 0 0 0 0 0 0 0 0 0 0 0 0
57950- 0 0 0 0 0 0 0 0 0 6 6 6
57951- 14 14 14 18 18 18 22 22 22 22 22 22
57952- 18 18 18 14 14 14 10 10 10 6 6 6
57953- 0 0 0 0 0 0 0 0 0 0 0 0
57954- 0 0 0 0 0 0 0 0 0 0 0 0
57955- 0 0 0 0 0 0 0 0 0 0 0 0
57956- 0 0 0 0 0 0 0 0 0 0 0 0
57957- 0 0 0 0 0 0 0 0 0 0 0 0
57958+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57959+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57960+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57961+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57962+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57963+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57964+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57965+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57966+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57967+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57968+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57969+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57970+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57971+4 4 4 4 4 4
57972+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57973+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57974+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57975+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57976+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57977+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57978+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57979+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57980+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57981+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57982+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57983+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57984+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57985+4 4 4 4 4 4
57986+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57987+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57988+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57989+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57990+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57991+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57992+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57993+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57994+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57995+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57996+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57997+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57998+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57999+4 4 4 4 4 4
58000+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58001+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58002+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58003+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58004+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58005+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58006+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58007+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58008+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58009+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58010+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58011+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58012+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58013+4 4 4 4 4 4
58014+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58015+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58016+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58017+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58018+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58019+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58020+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58021+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58022+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58023+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58024+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58025+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58026+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58027+4 4 4 4 4 4
58028+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58029+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58030+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58031+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58032+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58033+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58034+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58035+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58036+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58037+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58038+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58039+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58040+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58041+4 4 4 4 4 4
58042+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58043+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58044+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58045+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58046+4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
58047+0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
58048+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58049+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58050+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58051+4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
58052+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
58053+4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
58054+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58055+4 4 4 4 4 4
58056+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58057+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58058+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58059+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58060+4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
58061+37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
58062+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58063+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58064+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58065+4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
58066+2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
58067+4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
58068+1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58069+4 4 4 4 4 4
58070+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58071+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58072+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58073+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58074+2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
58075+153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
58076+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
58077+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58078+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58079+4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
58080+60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
58081+4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
58082+2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
58083+4 4 4 4 4 4
58084+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58085+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58086+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58087+4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
58088+4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
58089+165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
58090+1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
58091+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58092+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
58093+3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
58094+163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
58095+0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
58096+37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
58097+4 4 4 4 4 4
58098+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58099+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58100+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58101+4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
58102+37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
58103+156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
58104+125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
58105+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
58106+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
58107+0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
58108+174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
58109+0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
58110+64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
58111+4 4 4 4 4 4
58112+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58113+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58114+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
58115+5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
58116+156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
58117+156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
58118+174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
58119+1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
58120+4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
58121+13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
58122+174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
58123+22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
58124+90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
58125+4 4 4 4 4 4
58126+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58127+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58128+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
58129+0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
58130+174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
58131+156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
58132+163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
58133+4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
58134+5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
58135+131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
58136+190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
58137+90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
58138+31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
58139+4 4 4 4 4 4
58140+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58141+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58142+4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
58143+4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
58144+155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
58145+167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
58146+153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
58147+41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
58148+1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
58149+177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
58150+125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
58151+136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
58152+7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
58153+4 4 4 4 4 4
58154+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58155+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58156+4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
58157+125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
58158+156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
58159+137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
58160+156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
58161+167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
58162+0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
58163+166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
58164+6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
58165+90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
58166+1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
58167+4 4 4 4 4 4
58168+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58169+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58170+1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
58171+167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
58172+157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
58173+26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
58174+158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
58175+165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
58176+60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
58177+137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
58178+52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
58179+13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
58180+4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
58181+4 4 4 4 4 4
58182+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58183+4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
58184+0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
58185+158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
58186+167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
58187+4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
58188+174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
58189+155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
58190+137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
58191+16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
58192+136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
58193+2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
58194+4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
58195+4 4 4 4 4 4
58196+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58197+4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
58198+37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
58199+157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
58200+153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
58201+4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
58202+125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
58203+156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
58204+174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
58205+4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
58206+136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
58207+1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
58208+2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
58209+0 0 0 4 4 4
58210+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
58211+4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
58212+158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
58213+153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
58214+37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
58215+4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
58216+4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
58217+154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
58218+174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
58219+32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
58220+28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
58221+50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
58222+0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
58223+2 0 0 0 0 0
58224+4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
58225+0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
58226+174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
58227+165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
58228+4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
58229+4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
58230+4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
58231+174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
58232+60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
58233+136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
58234+22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
58235+136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
58236+26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
58237+37 38 37 0 0 0
58238+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
58239+13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
58240+153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
58241+177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
58242+4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
58243+5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
58244+6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
58245+166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
58246+4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
58247+146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
58248+71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
58249+90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
58250+125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
58251+85 115 134 4 0 0
58252+4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
58253+125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
58254+155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
58255+125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
58256+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
58257+0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
58258+5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
58259+37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
58260+4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
58261+90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
58262+2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
58263+13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
58264+166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
58265+60 73 81 4 0 0
58266+4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
58267+174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
58268+156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
58269+4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
58270+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
58271+10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
58272+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
58273+4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
58274+80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
58275+28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
58276+50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
58277+1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
58278+167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
58279+16 19 21 4 0 0
58280+4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
58281+158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
58282+167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
58283+4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
58284+4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
58285+80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
58286+4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
58287+3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
58288+146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
58289+68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
58290+136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
58291+24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
58292+163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
58293+4 0 0 4 3 3
58294+3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
58295+156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
58296+155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
58297+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
58298+2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
58299+136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
58300+0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
58301+0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
58302+136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
58303+28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
58304+22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
58305+137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
58306+60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
58307+3 2 2 4 4 4
58308+3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
58309+157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
58310+37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
58311+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
58312+0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
58313+101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
58314+14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
58315+22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
58316+136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
58317+17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
58318+2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
58319+166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
58320+13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
58321+4 4 4 4 4 4
58322+1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
58323+163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
58324+4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
58325+4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
58326+40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
58327+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
58328+101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
58329+136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
58330+136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
58331+136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
58332+3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
58333+174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
58334+4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
58335+4 4 4 4 4 4
58336+4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
58337+155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
58338+4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
58339+4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
58340+101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
58341+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
58342+136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
58343+136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
58344+136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
58345+90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
58346+85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
58347+167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
58348+6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
58349+5 5 5 5 5 5
58350+1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
58351+131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
58352+6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
58353+0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
58354+101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
58355+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
58356+101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
58357+136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
58358+101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
58359+7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
58360+174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
58361+24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
58362+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
58363+5 5 5 4 4 4
58364+4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
58365+131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
58366+6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
58367+13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
58368+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
58369+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
58370+101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
58371+136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
58372+136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
58373+2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
58374+174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
58375+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
58376+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58377+4 4 4 4 4 4
58378+1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
58379+137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
58380+4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
58381+64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
58382+90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
58383+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
58384+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
58385+136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
58386+101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
58387+37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
58388+167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
58389+3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
58390+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58391+4 4 4 4 4 4
58392+4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
58393+153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
58394+4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
58395+90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
58396+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
58397+90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
58398+101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
58399+101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
58400+35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
58401+154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
58402+60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
58403+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58404+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58405+4 4 4 4 4 4
58406+1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
58407+153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
58408+4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
58409+64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
58410+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
58411+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
58412+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
58413+136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
58414+13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
58415+174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
58416+6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
58417+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58418+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58419+4 4 4 4 4 4
58420+4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
58421+156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
58422+4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
58423+90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
58424+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
58425+90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
58426+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
58427+101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
58428+2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
58429+174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
58430+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58431+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58432+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58433+4 4 4 4 4 4
58434+3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
58435+158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
58436+4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
58437+37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
58438+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
58439+90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
58440+101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
58441+90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
58442+5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
58443+167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
58444+6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
58445+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58446+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58447+4 4 4 4 4 4
58448+4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
58449+163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
58450+4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
58451+18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
58452+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
58453+90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
58454+101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
58455+13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
58456+3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
58457+174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
58458+4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
58459+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58460+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58461+4 4 4 4 4 4
58462+1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
58463+167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
58464+4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
58465+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
58466+26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
58467+90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
58468+101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
58469+7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
58470+4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
58471+174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
58472+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58473+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58474+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58475+4 4 4 4 4 4
58476+4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
58477+174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
58478+5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
58479+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
58480+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
58481+90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
58482+101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
58483+2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
58484+3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
58485+153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
58486+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58487+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58488+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58489+4 4 4 4 4 4
58490+1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
58491+174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
58492+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
58493+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
58494+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
58495+26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
58496+35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
58497+2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
58498+3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
58499+131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
58500+4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58501+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58502+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58503+4 4 4 4 4 4
58504+3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
58505+174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
58506+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
58507+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
58508+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
58509+26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
58510+7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
58511+4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
58512+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
58513+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58514+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58515+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58516+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58517+4 4 4 4 4 4
58518+1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
58519+174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
58520+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
58521+18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
58522+18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
58523+26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
58524+28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
58525+3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
58526+4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
58527+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58528+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58529+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58530+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58531+4 4 4 4 4 4
58532+4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
58533+174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
58534+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
58535+10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
58536+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
58537+18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
58538+90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
58539+3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
58540+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
58541+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58542+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58543+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58544+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58545+4 4 4 4 4 4
58546+1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
58547+177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
58548+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
58549+10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
58550+26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
58551+6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
58552+10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
58553+2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
58554+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
58555+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58556+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58557+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58558+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58559+4 4 4 4 4 4
58560+4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
58561+177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
58562+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
58563+10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
58564+26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
58565+7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
58566+3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
58567+21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
58568+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
58569+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58570+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58571+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58572+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58573+4 4 4 4 4 4
58574+3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
58575+190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
58576+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
58577+10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
58578+24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
58579+18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
58580+28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
58581+26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
58582+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
58583+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58584+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58585+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58586+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58587+4 4 4 4 4 4
58588+4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
58589+190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
58590+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
58591+10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
58592+0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
58593+26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
58594+37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
58595+90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
58596+4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
58597+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58598+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58599+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58600+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58601+4 4 4 4 4 4
58602+4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
58603+193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
58604+5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
58605+10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
58606+1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
58607+26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
58608+22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
58609+26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
58610+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
58611+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58612+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58613+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58614+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58615+4 4 4 4 4 4
58616+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
58617+190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
58618+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
58619+10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
58620+2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
58621+26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
58622+10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
58623+26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
58624+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
58625+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58626+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58627+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58628+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58629+4 4 4 4 4 4
58630+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
58631+193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
58632+5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
58633+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
58634+13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
58635+10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
58636+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
58637+26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
58638+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
58639+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58640+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58641+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58642+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58643+4 4 4 4 4 4
58644+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
58645+190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
58646+5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
58647+28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
58648+10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
58649+28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
58650+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
58651+26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
58652+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
58653+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58654+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58655+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58656+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58657+4 4 4 4 4 4
58658+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
58659+193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
58660+5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
58661+4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
58662+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
58663+10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
58664+18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
58665+22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
58666+4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
58667+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58668+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58669+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58670+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58671+4 4 4 4 4 4
58672+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
58673+190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
58674+6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
58675+1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
58676+18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
58677+10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
58678+26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
58679+1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
58680+5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
58681+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58682+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58683+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58684+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58685+4 4 4 4 4 4
58686+4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
58687+193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
58688+2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
58689+4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
58690+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
58691+10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
58692+26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
58693+2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
58694+3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
58695+131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58696+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58697+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58698+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58699+4 4 4 4 4 4
58700+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
58701+193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
58702+0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
58703+4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
58704+13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
58705+10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
58706+28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
58707+4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
58708+0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
58709+125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58710+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58711+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58712+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58713+4 4 4 4 4 4
58714+4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
58715+193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
58716+120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
58717+4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
58718+4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
58719+10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
58720+4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
58721+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
58722+24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
58723+125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
58724+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58725+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58726+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58727+4 4 4 4 4 4
58728+4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
58729+174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
58730+220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
58731+3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
58732+4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
58733+10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
58734+1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
58735+5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
58736+137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
58737+125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
58738+0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58739+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58740+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58741+4 4 4 4 4 4
58742+5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
58743+193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
58744+220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
58745+4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
58746+4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
58747+22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
58748+4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58749+1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
58750+166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
58751+125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
58752+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
58753+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58754+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58755+4 4 4 4 4 4
58756+4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
58757+220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
58758+205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
58759+24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
58760+4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
58761+4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
58762+4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
58763+2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
58764+156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
58765+137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
58766+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58767+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58768+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58769+4 4 4 4 4 4
58770+5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
58771+125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
58772+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
58773+193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
58774+5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
58775+1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
58776+5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
58777+60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
58778+153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
58779+125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
58780+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58781+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58782+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58783+4 4 4 4 4 4
58784+4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
58785+6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
58786+193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
58787+244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
58788+0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
58789+4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
58790+3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
58791+220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
58792+153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
58793+13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
58794+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58795+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58796+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58797+4 4 4 4 4 4
58798+5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
58799+6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
58800+244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
58801+220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
58802+3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
58803+4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
58804+0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
58805+177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
58806+158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
58807+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
58808+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58809+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58810+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58811+4 4 4 4 4 4
58812+5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
58813+6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
58814+177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
58815+220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
58816+125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
58817+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
58818+37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
58819+174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
58820+158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
58821+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
58822+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58823+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58824+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58825+4 4 4 4 4 4
58826+4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
58827+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
58828+26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
58829+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
58830+244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
58831+0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
58832+177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
58833+174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
58834+60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
58835+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58836+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58837+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58838+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58839+4 4 4 4 4 4
58840+5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
58841+6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
58842+6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
58843+220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
58844+220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
58845+0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
58846+220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
58847+174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
58848+4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
58849+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58850+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58851+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58852+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58853+4 4 4 4 4 4
58854+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
58855+6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
58856+4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
58857+220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
58858+205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
58859+60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
58860+177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
58861+190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
58862+4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58863+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58864+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58865+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58866+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58867+4 4 4 4 4 4
58868+4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
58869+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
58870+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
58871+125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
58872+205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
58873+193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
58874+190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
58875+153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
58876+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58877+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58878+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58879+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58880+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58881+4 4 4 4 4 4
58882+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
58883+6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
58884+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
58885+4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
58886+205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
58887+220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
58888+174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
58889+6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
58890+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58891+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58892+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58893+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58894+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58895+4 4 4 4 4 4
58896+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
58897+5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
58898+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
58899+4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
58900+220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
58901+190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
58902+193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
58903+4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
58904+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58905+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58906+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58907+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58908+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58909+4 4 4 4 4 4
58910+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58911+4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
58912+4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
58913+6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
58914+174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
58915+193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
58916+193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
58917+6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
58918+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58919+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58920+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58921+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58922+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58923+4 4 4 4 4 4
58924+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58925+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
58926+5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
58927+5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
58928+6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
58929+193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
58930+60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
58931+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
58932+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58933+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58934+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58935+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58936+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58937+4 4 4 4 4 4
58938+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58939+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58940+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
58941+5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
58942+4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
58943+193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
58944+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
58945+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
58946+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58947+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58948+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58949+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58950+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58951+4 4 4 4 4 4
58952+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58953+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58954+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
58955+4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
58956+6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
58957+153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
58958+6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
58959+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58960+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58961+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58962+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58963+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58964+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58965+4 4 4 4 4 4
58966+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58967+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58968+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58969+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
58970+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
58971+24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
58972+6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
58973+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58974+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58975+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58976+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58977+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58978+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58979+4 4 4 4 4 4
58980+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58981+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58982+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58983+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
58984+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
58985+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
58986+4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
58987+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58988+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58989+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58990+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58991+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58992+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58993+4 4 4 4 4 4
58994+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58995+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58996+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58997+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
58998+5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
58999+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
59000+6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
59001+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59002+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59003+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59004+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59005+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59006+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59007+4 4 4 4 4 4
59008+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59009+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59010+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59011+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
59012+4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
59013+4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
59014+6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
59015+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59016+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59017+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59018+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59019+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59020+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59021+4 4 4 4 4 4
59022+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59023+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59024+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59025+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59026+4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
59027+6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
59028+4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
59029+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59030+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59031+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59032+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59033+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59034+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59035+4 4 4 4 4 4
59036+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59037+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59038+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59039+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59040+4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
59041+4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
59042+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59043+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59044+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59045+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59046+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59047+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59048+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59049+4 4 4 4 4 4
59050+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59051+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59052+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59053+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59054+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
59055+5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
59056+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59057+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59058+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59059+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59060+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59061+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59062+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59063+4 4 4 4 4 4
59064+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59065+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59066+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59067+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59068+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
59069+5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
59070+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59071+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59072+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59073+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59074+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59075+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59076+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59077+4 4 4 4 4 4
59078diff --git a/drivers/xen/xenfs/xenstored.c b/drivers/xen/xenfs/xenstored.c
59079index fef20db..d28b1ab 100644
59080--- a/drivers/xen/xenfs/xenstored.c
59081+++ b/drivers/xen/xenfs/xenstored.c
59082@@ -24,7 +24,12 @@ static int xsd_release(struct inode *inode, struct file *file)
59083 static int xsd_kva_open(struct inode *inode, struct file *file)
59084 {
59085 file->private_data = (void *)kasprintf(GFP_KERNEL, "0x%p",
59086+#ifdef CONFIG_GRKERNSEC_HIDESYM
59087+ NULL);
59088+#else
59089 xen_store_interface);
59090+#endif
59091+
59092 if (!file->private_data)
59093 return -ENOMEM;
59094 return 0;
59095diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
59096index cc1cfae..41158ad 100644
59097--- a/fs/9p/vfs_addr.c
59098+++ b/fs/9p/vfs_addr.c
59099@@ -187,7 +187,7 @@ static int v9fs_vfs_writepage_locked(struct page *page)
59100
59101 retval = v9fs_file_write_internal(inode,
59102 v9inode->writeback_fid,
59103- (__force const char __user *)buffer,
59104+ (const char __force_user *)buffer,
59105 len, &offset, 0);
59106 if (retval > 0)
59107 retval = 0;
59108diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
59109index 7fa4f7a..a7ebf8c 100644
59110--- a/fs/9p/vfs_inode.c
59111+++ b/fs/9p/vfs_inode.c
59112@@ -1312,7 +1312,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
59113 void
59114 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
59115 {
59116- char *s = nd_get_link(nd);
59117+ const char *s = nd_get_link(nd);
59118
59119 p9_debug(P9_DEBUG_VFS, " %s %s\n",
59120 dentry->d_name.name, IS_ERR(s) ? "<error>" : s);
59121diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
59122index 370b24c..ff0be7b 100644
59123--- a/fs/Kconfig.binfmt
59124+++ b/fs/Kconfig.binfmt
59125@@ -103,7 +103,7 @@ config HAVE_AOUT
59126
59127 config BINFMT_AOUT
59128 tristate "Kernel support for a.out and ECOFF binaries"
59129- depends on HAVE_AOUT
59130+ depends on HAVE_AOUT && BROKEN
59131 ---help---
59132 A.out (Assembler.OUTput) is a set of formats for libraries and
59133 executables used in the earliest versions of UNIX. Linux used
59134diff --git a/fs/afs/inode.c b/fs/afs/inode.c
59135index 2946712..f737435 100644
59136--- a/fs/afs/inode.c
59137+++ b/fs/afs/inode.c
59138@@ -141,7 +141,7 @@ struct inode *afs_iget_autocell(struct inode *dir, const char *dev_name,
59139 struct afs_vnode *vnode;
59140 struct super_block *sb;
59141 struct inode *inode;
59142- static atomic_t afs_autocell_ino;
59143+ static atomic_unchecked_t afs_autocell_ino;
59144
59145 _enter("{%x:%u},%*.*s,",
59146 AFS_FS_I(dir)->fid.vid, AFS_FS_I(dir)->fid.vnode,
59147@@ -154,7 +154,7 @@ struct inode *afs_iget_autocell(struct inode *dir, const char *dev_name,
59148 data.fid.unique = 0;
59149 data.fid.vnode = 0;
59150
59151- inode = iget5_locked(sb, atomic_inc_return(&afs_autocell_ino),
59152+ inode = iget5_locked(sb, atomic_inc_return_unchecked(&afs_autocell_ino),
59153 afs_iget5_autocell_test, afs_iget5_set,
59154 &data);
59155 if (!inode) {
59156diff --git a/fs/aio.c b/fs/aio.c
59157index 7337500..2058af6 100644
59158--- a/fs/aio.c
59159+++ b/fs/aio.c
59160@@ -380,7 +380,7 @@ static int aio_setup_ring(struct kioctx *ctx)
59161 size += sizeof(struct io_event) * nr_events;
59162
59163 nr_pages = PFN_UP(size);
59164- if (nr_pages < 0)
59165+ if (nr_pages <= 0)
59166 return -EINVAL;
59167
59168 file = aio_private_file(ctx, nr_pages);
59169diff --git a/fs/attr.c b/fs/attr.c
59170index 6530ced..4a827e2 100644
59171--- a/fs/attr.c
59172+++ b/fs/attr.c
59173@@ -102,6 +102,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
59174 unsigned long limit;
59175
59176 limit = rlimit(RLIMIT_FSIZE);
59177+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
59178 if (limit != RLIM_INFINITY && offset > limit)
59179 goto out_sig;
59180 if (offset > inode->i_sb->s_maxbytes)
59181diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
59182index 116fd38..c04182da 100644
59183--- a/fs/autofs4/waitq.c
59184+++ b/fs/autofs4/waitq.c
59185@@ -59,7 +59,7 @@ static int autofs4_write(struct autofs_sb_info *sbi,
59186 {
59187 unsigned long sigpipe, flags;
59188 mm_segment_t fs;
59189- const char *data = (const char *)addr;
59190+ const char __user *data = (const char __force_user *)addr;
59191 ssize_t wr = 0;
59192
59193 sigpipe = sigismember(&current->pending.signal, SIGPIPE);
59194@@ -340,6 +340,10 @@ static int validate_request(struct autofs_wait_queue **wait,
59195 return 1;
59196 }
59197
59198+#ifdef CONFIG_GRKERNSEC_HIDESYM
59199+static atomic_unchecked_t autofs_dummy_name_id = ATOMIC_INIT(0);
59200+#endif
59201+
59202 int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
59203 enum autofs_notify notify)
59204 {
59205@@ -385,7 +389,12 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
59206
59207 /* If this is a direct mount request create a dummy name */
59208 if (IS_ROOT(dentry) && autofs_type_trigger(sbi->type))
59209+#ifdef CONFIG_GRKERNSEC_HIDESYM
59210+ /* this name does get written to userland via autofs4_write() */
59211+ qstr.len = sprintf(name, "%08x", atomic_inc_return_unchecked(&autofs_dummy_name_id));
59212+#else
59213 qstr.len = sprintf(name, "%p", dentry);
59214+#endif
59215 else {
59216 qstr.len = autofs4_getpath(sbi, dentry, &name);
59217 if (!qstr.len) {
59218diff --git a/fs/befs/endian.h b/fs/befs/endian.h
59219index 2722387..56059b5 100644
59220--- a/fs/befs/endian.h
59221+++ b/fs/befs/endian.h
59222@@ -11,7 +11,7 @@
59223
59224 #include <asm/byteorder.h>
59225
59226-static inline u64
59227+static inline u64 __intentional_overflow(-1)
59228 fs64_to_cpu(const struct super_block *sb, fs64 n)
59229 {
59230 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
59231@@ -29,7 +29,7 @@ cpu_to_fs64(const struct super_block *sb, u64 n)
59232 return (__force fs64)cpu_to_be64(n);
59233 }
59234
59235-static inline u32
59236+static inline u32 __intentional_overflow(-1)
59237 fs32_to_cpu(const struct super_block *sb, fs32 n)
59238 {
59239 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
59240@@ -47,7 +47,7 @@ cpu_to_fs32(const struct super_block *sb, u32 n)
59241 return (__force fs32)cpu_to_be32(n);
59242 }
59243
59244-static inline u16
59245+static inline u16 __intentional_overflow(-1)
59246 fs16_to_cpu(const struct super_block *sb, fs16 n)
59247 {
59248 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
59249diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
59250index ca0ba15..0fa3257 100644
59251--- a/fs/binfmt_aout.c
59252+++ b/fs/binfmt_aout.c
59253@@ -16,6 +16,7 @@
59254 #include <linux/string.h>
59255 #include <linux/fs.h>
59256 #include <linux/file.h>
59257+#include <linux/security.h>
59258 #include <linux/stat.h>
59259 #include <linux/fcntl.h>
59260 #include <linux/ptrace.h>
59261@@ -58,6 +59,8 @@ static int aout_core_dump(struct coredump_params *cprm)
59262 #endif
59263 # define START_STACK(u) ((void __user *)u.start_stack)
59264
59265+ memset(&dump, 0, sizeof(dump));
59266+
59267 fs = get_fs();
59268 set_fs(KERNEL_DS);
59269 has_dumped = 1;
59270@@ -68,10 +71,12 @@ static int aout_core_dump(struct coredump_params *cprm)
59271
59272 /* If the size of the dump file exceeds the rlimit, then see what would happen
59273 if we wrote the stack, but not the data area. */
59274+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
59275 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
59276 dump.u_dsize = 0;
59277
59278 /* Make sure we have enough room to write the stack and data areas. */
59279+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
59280 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
59281 dump.u_ssize = 0;
59282
59283@@ -232,6 +237,8 @@ static int load_aout_binary(struct linux_binprm * bprm)
59284 rlim = rlimit(RLIMIT_DATA);
59285 if (rlim >= RLIM_INFINITY)
59286 rlim = ~0;
59287+
59288+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
59289 if (ex.a_data + ex.a_bss > rlim)
59290 return -ENOMEM;
59291
59292@@ -264,6 +271,27 @@ static int load_aout_binary(struct linux_binprm * bprm)
59293
59294 install_exec_creds(bprm);
59295
59296+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
59297+ current->mm->pax_flags = 0UL;
59298+#endif
59299+
59300+#ifdef CONFIG_PAX_PAGEEXEC
59301+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
59302+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
59303+
59304+#ifdef CONFIG_PAX_EMUTRAMP
59305+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
59306+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
59307+#endif
59308+
59309+#ifdef CONFIG_PAX_MPROTECT
59310+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
59311+ current->mm->pax_flags |= MF_PAX_MPROTECT;
59312+#endif
59313+
59314+ }
59315+#endif
59316+
59317 if (N_MAGIC(ex) == OMAGIC) {
59318 unsigned long text_addr, map_size;
59319 loff_t pos;
59320@@ -321,7 +349,7 @@ static int load_aout_binary(struct linux_binprm * bprm)
59321 }
59322
59323 error = vm_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
59324- PROT_READ | PROT_WRITE | PROT_EXEC,
59325+ PROT_READ | PROT_WRITE,
59326 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
59327 fd_offset + ex.a_text);
59328 if (error != N_DATADDR(ex)) {
59329diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
59330index 3892c1a..4e27c04 100644
59331--- a/fs/binfmt_elf.c
59332+++ b/fs/binfmt_elf.c
59333@@ -34,6 +34,7 @@
59334 #include <linux/utsname.h>
59335 #include <linux/coredump.h>
59336 #include <linux/sched.h>
59337+#include <linux/xattr.h>
59338 #include <asm/uaccess.h>
59339 #include <asm/param.h>
59340 #include <asm/page.h>
59341@@ -47,7 +48,7 @@
59342
59343 static int load_elf_binary(struct linux_binprm *bprm);
59344 static unsigned long elf_map(struct file *, unsigned long, struct elf_phdr *,
59345- int, int, unsigned long);
59346+ int, int, unsigned long) __intentional_overflow(-1);
59347
59348 #ifdef CONFIG_USELIB
59349 static int load_elf_library(struct file *);
59350@@ -65,6 +66,14 @@ static int elf_core_dump(struct coredump_params *cprm);
59351 #define elf_core_dump NULL
59352 #endif
59353
59354+#ifdef CONFIG_PAX_MPROTECT
59355+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
59356+#endif
59357+
59358+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
59359+static void elf_handle_mmap(struct file *file);
59360+#endif
59361+
59362 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
59363 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
59364 #else
59365@@ -84,6 +93,15 @@ static struct linux_binfmt elf_format = {
59366 .load_binary = load_elf_binary,
59367 .load_shlib = load_elf_library,
59368 .core_dump = elf_core_dump,
59369+
59370+#ifdef CONFIG_PAX_MPROTECT
59371+ .handle_mprotect= elf_handle_mprotect,
59372+#endif
59373+
59374+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
59375+ .handle_mmap = elf_handle_mmap,
59376+#endif
59377+
59378 .min_coredump = ELF_EXEC_PAGESIZE,
59379 };
59380
59381@@ -91,6 +109,8 @@ static struct linux_binfmt elf_format = {
59382
59383 static int set_brk(unsigned long start, unsigned long end)
59384 {
59385+ unsigned long e = end;
59386+
59387 start = ELF_PAGEALIGN(start);
59388 end = ELF_PAGEALIGN(end);
59389 if (end > start) {
59390@@ -99,7 +119,7 @@ static int set_brk(unsigned long start, unsigned long end)
59391 if (BAD_ADDR(addr))
59392 return addr;
59393 }
59394- current->mm->start_brk = current->mm->brk = end;
59395+ current->mm->start_brk = current->mm->brk = e;
59396 return 0;
59397 }
59398
59399@@ -160,12 +180,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
59400 elf_addr_t __user *u_rand_bytes;
59401 const char *k_platform = ELF_PLATFORM;
59402 const char *k_base_platform = ELF_BASE_PLATFORM;
59403- unsigned char k_rand_bytes[16];
59404+ u32 k_rand_bytes[4];
59405 int items;
59406 elf_addr_t *elf_info;
59407 int ei_index = 0;
59408 const struct cred *cred = current_cred();
59409 struct vm_area_struct *vma;
59410+ unsigned long saved_auxv[AT_VECTOR_SIZE];
59411
59412 /*
59413 * In some cases (e.g. Hyper-Threading), we want to avoid L1
59414@@ -207,8 +228,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
59415 * Generate 16 random bytes for userspace PRNG seeding.
59416 */
59417 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
59418- u_rand_bytes = (elf_addr_t __user *)
59419- STACK_ALLOC(p, sizeof(k_rand_bytes));
59420+ prandom_seed(k_rand_bytes[0] ^ prandom_u32());
59421+ prandom_seed(k_rand_bytes[1] ^ prandom_u32());
59422+ prandom_seed(k_rand_bytes[2] ^ prandom_u32());
59423+ prandom_seed(k_rand_bytes[3] ^ prandom_u32());
59424+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
59425+ u_rand_bytes = (elf_addr_t __user *) p;
59426 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
59427 return -EFAULT;
59428
59429@@ -323,9 +348,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
59430 return -EFAULT;
59431 current->mm->env_end = p;
59432
59433+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
59434+
59435 /* Put the elf_info on the stack in the right place. */
59436 sp = (elf_addr_t __user *)envp + 1;
59437- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
59438+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
59439 return -EFAULT;
59440 return 0;
59441 }
59442@@ -393,15 +420,14 @@ static unsigned long total_mapping_size(struct elf_phdr *cmds, int nr)
59443 an ELF header */
59444
59445 static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
59446- struct file *interpreter, unsigned long *interp_map_addr,
59447- unsigned long no_base)
59448+ struct file *interpreter, unsigned long no_base)
59449 {
59450 struct elf_phdr *elf_phdata;
59451 struct elf_phdr *eppnt;
59452- unsigned long load_addr = 0;
59453+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
59454 int load_addr_set = 0;
59455 unsigned long last_bss = 0, elf_bss = 0;
59456- unsigned long error = ~0UL;
59457+ unsigned long error = -EINVAL;
59458 unsigned long total_size;
59459 int retval, i, size;
59460
59461@@ -447,6 +473,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
59462 goto out_close;
59463 }
59464
59465+#ifdef CONFIG_PAX_SEGMEXEC
59466+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
59467+ pax_task_size = SEGMEXEC_TASK_SIZE;
59468+#endif
59469+
59470 eppnt = elf_phdata;
59471 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
59472 if (eppnt->p_type == PT_LOAD) {
59473@@ -470,8 +501,6 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
59474 map_addr = elf_map(interpreter, load_addr + vaddr,
59475 eppnt, elf_prot, elf_type, total_size);
59476 total_size = 0;
59477- if (!*interp_map_addr)
59478- *interp_map_addr = map_addr;
59479 error = map_addr;
59480 if (BAD_ADDR(map_addr))
59481 goto out_close;
59482@@ -490,8 +519,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
59483 k = load_addr + eppnt->p_vaddr;
59484 if (BAD_ADDR(k) ||
59485 eppnt->p_filesz > eppnt->p_memsz ||
59486- eppnt->p_memsz > TASK_SIZE ||
59487- TASK_SIZE - eppnt->p_memsz < k) {
59488+ eppnt->p_memsz > pax_task_size ||
59489+ pax_task_size - eppnt->p_memsz < k) {
59490 error = -ENOMEM;
59491 goto out_close;
59492 }
59493@@ -530,9 +559,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
59494 elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1);
59495
59496 /* Map the last of the bss segment */
59497- error = vm_brk(elf_bss, last_bss - elf_bss);
59498- if (BAD_ADDR(error))
59499- goto out_close;
59500+ if (last_bss > elf_bss) {
59501+ error = vm_brk(elf_bss, last_bss - elf_bss);
59502+ if (BAD_ADDR(error))
59503+ goto out_close;
59504+ }
59505 }
59506
59507 error = load_addr;
59508@@ -543,6 +574,336 @@ out:
59509 return error;
59510 }
59511
59512+#ifdef CONFIG_PAX_PT_PAX_FLAGS
59513+#ifdef CONFIG_PAX_SOFTMODE
59514+static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
59515+{
59516+ unsigned long pax_flags = 0UL;
59517+
59518+#ifdef CONFIG_PAX_PAGEEXEC
59519+ if (elf_phdata->p_flags & PF_PAGEEXEC)
59520+ pax_flags |= MF_PAX_PAGEEXEC;
59521+#endif
59522+
59523+#ifdef CONFIG_PAX_SEGMEXEC
59524+ if (elf_phdata->p_flags & PF_SEGMEXEC)
59525+ pax_flags |= MF_PAX_SEGMEXEC;
59526+#endif
59527+
59528+#ifdef CONFIG_PAX_EMUTRAMP
59529+ if ((elf_phdata->p_flags & PF_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
59530+ pax_flags |= MF_PAX_EMUTRAMP;
59531+#endif
59532+
59533+#ifdef CONFIG_PAX_MPROTECT
59534+ if (elf_phdata->p_flags & PF_MPROTECT)
59535+ pax_flags |= MF_PAX_MPROTECT;
59536+#endif
59537+
59538+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
59539+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
59540+ pax_flags |= MF_PAX_RANDMMAP;
59541+#endif
59542+
59543+ return pax_flags;
59544+}
59545+#endif
59546+
59547+static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
59548+{
59549+ unsigned long pax_flags = 0UL;
59550+
59551+#ifdef CONFIG_PAX_PAGEEXEC
59552+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
59553+ pax_flags |= MF_PAX_PAGEEXEC;
59554+#endif
59555+
59556+#ifdef CONFIG_PAX_SEGMEXEC
59557+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
59558+ pax_flags |= MF_PAX_SEGMEXEC;
59559+#endif
59560+
59561+#ifdef CONFIG_PAX_EMUTRAMP
59562+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
59563+ pax_flags |= MF_PAX_EMUTRAMP;
59564+#endif
59565+
59566+#ifdef CONFIG_PAX_MPROTECT
59567+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
59568+ pax_flags |= MF_PAX_MPROTECT;
59569+#endif
59570+
59571+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
59572+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
59573+ pax_flags |= MF_PAX_RANDMMAP;
59574+#endif
59575+
59576+ return pax_flags;
59577+}
59578+#endif
59579+
59580+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
59581+#ifdef CONFIG_PAX_SOFTMODE
59582+static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
59583+{
59584+ unsigned long pax_flags = 0UL;
59585+
59586+#ifdef CONFIG_PAX_PAGEEXEC
59587+ if (pax_flags_softmode & MF_PAX_PAGEEXEC)
59588+ pax_flags |= MF_PAX_PAGEEXEC;
59589+#endif
59590+
59591+#ifdef CONFIG_PAX_SEGMEXEC
59592+ if (pax_flags_softmode & MF_PAX_SEGMEXEC)
59593+ pax_flags |= MF_PAX_SEGMEXEC;
59594+#endif
59595+
59596+#ifdef CONFIG_PAX_EMUTRAMP
59597+ if ((pax_flags_softmode & MF_PAX_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
59598+ pax_flags |= MF_PAX_EMUTRAMP;
59599+#endif
59600+
59601+#ifdef CONFIG_PAX_MPROTECT
59602+ if (pax_flags_softmode & MF_PAX_MPROTECT)
59603+ pax_flags |= MF_PAX_MPROTECT;
59604+#endif
59605+
59606+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
59607+ if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
59608+ pax_flags |= MF_PAX_RANDMMAP;
59609+#endif
59610+
59611+ return pax_flags;
59612+}
59613+#endif
59614+
59615+static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
59616+{
59617+ unsigned long pax_flags = 0UL;
59618+
59619+#ifdef CONFIG_PAX_PAGEEXEC
59620+ if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
59621+ pax_flags |= MF_PAX_PAGEEXEC;
59622+#endif
59623+
59624+#ifdef CONFIG_PAX_SEGMEXEC
59625+ if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
59626+ pax_flags |= MF_PAX_SEGMEXEC;
59627+#endif
59628+
59629+#ifdef CONFIG_PAX_EMUTRAMP
59630+ if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
59631+ pax_flags |= MF_PAX_EMUTRAMP;
59632+#endif
59633+
59634+#ifdef CONFIG_PAX_MPROTECT
59635+ if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
59636+ pax_flags |= MF_PAX_MPROTECT;
59637+#endif
59638+
59639+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
59640+ if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
59641+ pax_flags |= MF_PAX_RANDMMAP;
59642+#endif
59643+
59644+ return pax_flags;
59645+}
59646+#endif
59647+
59648+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
59649+static unsigned long pax_parse_defaults(void)
59650+{
59651+ unsigned long pax_flags = 0UL;
59652+
59653+#ifdef CONFIG_PAX_SOFTMODE
59654+ if (pax_softmode)
59655+ return pax_flags;
59656+#endif
59657+
59658+#ifdef CONFIG_PAX_PAGEEXEC
59659+ pax_flags |= MF_PAX_PAGEEXEC;
59660+#endif
59661+
59662+#ifdef CONFIG_PAX_SEGMEXEC
59663+ pax_flags |= MF_PAX_SEGMEXEC;
59664+#endif
59665+
59666+#ifdef CONFIG_PAX_MPROTECT
59667+ pax_flags |= MF_PAX_MPROTECT;
59668+#endif
59669+
59670+#ifdef CONFIG_PAX_RANDMMAP
59671+ if (randomize_va_space)
59672+ pax_flags |= MF_PAX_RANDMMAP;
59673+#endif
59674+
59675+ return pax_flags;
59676+}
59677+
59678+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
59679+{
59680+ unsigned long pax_flags = PAX_PARSE_FLAGS_FALLBACK;
59681+
59682+#ifdef CONFIG_PAX_EI_PAX
59683+
59684+#ifdef CONFIG_PAX_SOFTMODE
59685+ if (pax_softmode)
59686+ return pax_flags;
59687+#endif
59688+
59689+ pax_flags = 0UL;
59690+
59691+#ifdef CONFIG_PAX_PAGEEXEC
59692+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
59693+ pax_flags |= MF_PAX_PAGEEXEC;
59694+#endif
59695+
59696+#ifdef CONFIG_PAX_SEGMEXEC
59697+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
59698+ pax_flags |= MF_PAX_SEGMEXEC;
59699+#endif
59700+
59701+#ifdef CONFIG_PAX_EMUTRAMP
59702+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
59703+ pax_flags |= MF_PAX_EMUTRAMP;
59704+#endif
59705+
59706+#ifdef CONFIG_PAX_MPROTECT
59707+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
59708+ pax_flags |= MF_PAX_MPROTECT;
59709+#endif
59710+
59711+#ifdef CONFIG_PAX_ASLR
59712+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
59713+ pax_flags |= MF_PAX_RANDMMAP;
59714+#endif
59715+
59716+#endif
59717+
59718+ return pax_flags;
59719+
59720+}
59721+
59722+static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
59723+{
59724+
59725+#ifdef CONFIG_PAX_PT_PAX_FLAGS
59726+ unsigned long i;
59727+
59728+ for (i = 0UL; i < elf_ex->e_phnum; i++)
59729+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
59730+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
59731+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
59732+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
59733+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
59734+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
59735+ return PAX_PARSE_FLAGS_FALLBACK;
59736+
59737+#ifdef CONFIG_PAX_SOFTMODE
59738+ if (pax_softmode)
59739+ return pax_parse_pt_pax_softmode(&elf_phdata[i]);
59740+ else
59741+#endif
59742+
59743+ return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
59744+ break;
59745+ }
59746+#endif
59747+
59748+ return PAX_PARSE_FLAGS_FALLBACK;
59749+}
59750+
59751+static unsigned long pax_parse_xattr_pax(struct file * const file)
59752+{
59753+
59754+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
59755+ ssize_t xattr_size, i;
59756+ unsigned char xattr_value[sizeof("pemrs") - 1];
59757+ unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
59758+
59759+ xattr_size = pax_getxattr(file->f_path.dentry, xattr_value, sizeof xattr_value);
59760+ if (xattr_size < 0 || xattr_size > sizeof xattr_value)
59761+ return PAX_PARSE_FLAGS_FALLBACK;
59762+
59763+ for (i = 0; i < xattr_size; i++)
59764+ switch (xattr_value[i]) {
59765+ default:
59766+ return PAX_PARSE_FLAGS_FALLBACK;
59767+
59768+#define parse_flag(option1, option2, flag) \
59769+ case option1: \
59770+ if (pax_flags_hardmode & MF_PAX_##flag) \
59771+ return PAX_PARSE_FLAGS_FALLBACK;\
59772+ pax_flags_hardmode |= MF_PAX_##flag; \
59773+ break; \
59774+ case option2: \
59775+ if (pax_flags_softmode & MF_PAX_##flag) \
59776+ return PAX_PARSE_FLAGS_FALLBACK;\
59777+ pax_flags_softmode |= MF_PAX_##flag; \
59778+ break;
59779+
59780+ parse_flag('p', 'P', PAGEEXEC);
59781+ parse_flag('e', 'E', EMUTRAMP);
59782+ parse_flag('m', 'M', MPROTECT);
59783+ parse_flag('r', 'R', RANDMMAP);
59784+ parse_flag('s', 'S', SEGMEXEC);
59785+
59786+#undef parse_flag
59787+ }
59788+
59789+ if (pax_flags_hardmode & pax_flags_softmode)
59790+ return PAX_PARSE_FLAGS_FALLBACK;
59791+
59792+#ifdef CONFIG_PAX_SOFTMODE
59793+ if (pax_softmode)
59794+ return pax_parse_xattr_pax_softmode(pax_flags_softmode);
59795+ else
59796+#endif
59797+
59798+ return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
59799+#else
59800+ return PAX_PARSE_FLAGS_FALLBACK;
59801+#endif
59802+
59803+}
59804+
59805+static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
59806+{
59807+ unsigned long pax_flags, ei_pax_flags, pt_pax_flags, xattr_pax_flags;
59808+
59809+ pax_flags = pax_parse_defaults();
59810+ ei_pax_flags = pax_parse_ei_pax(elf_ex);
59811+ pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
59812+ xattr_pax_flags = pax_parse_xattr_pax(file);
59813+
59814+ if (pt_pax_flags != PAX_PARSE_FLAGS_FALLBACK &&
59815+ xattr_pax_flags != PAX_PARSE_FLAGS_FALLBACK &&
59816+ pt_pax_flags != xattr_pax_flags)
59817+ return -EINVAL;
59818+ if (xattr_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
59819+ pax_flags = xattr_pax_flags;
59820+ else if (pt_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
59821+ pax_flags = pt_pax_flags;
59822+ else if (ei_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
59823+ pax_flags = ei_pax_flags;
59824+
59825+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
59826+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
59827+ if ((__supported_pte_mask & _PAGE_NX))
59828+ pax_flags &= ~MF_PAX_SEGMEXEC;
59829+ else
59830+ pax_flags &= ~MF_PAX_PAGEEXEC;
59831+ }
59832+#endif
59833+
59834+ if (0 > pax_check_flags(&pax_flags))
59835+ return -EINVAL;
59836+
59837+ current->mm->pax_flags = pax_flags;
59838+ return 0;
59839+}
59840+#endif
59841+
59842 /*
59843 * These are the functions used to load ELF style executables and shared
59844 * libraries. There is no binary dependent code anywhere else.
59845@@ -556,6 +917,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
59846 {
59847 unsigned int random_variable = 0;
59848
59849+#ifdef CONFIG_PAX_RANDUSTACK
59850+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
59851+ return stack_top - current->mm->delta_stack;
59852+#endif
59853+
59854 if ((current->flags & PF_RANDOMIZE) &&
59855 !(current->personality & ADDR_NO_RANDOMIZE)) {
59856 random_variable = get_random_int() & STACK_RND_MASK;
59857@@ -574,7 +940,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
59858 unsigned long load_addr = 0, load_bias = 0;
59859 int load_addr_set = 0;
59860 char * elf_interpreter = NULL;
59861- unsigned long error;
59862+ unsigned long error = 0;
59863 struct elf_phdr *elf_ppnt, *elf_phdata;
59864 unsigned long elf_bss, elf_brk;
59865 int retval, i;
59866@@ -589,6 +955,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
59867 struct elfhdr elf_ex;
59868 struct elfhdr interp_elf_ex;
59869 } *loc;
59870+ unsigned long pax_task_size;
59871
59872 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
59873 if (!loc) {
59874@@ -726,6 +1093,77 @@ static int load_elf_binary(struct linux_binprm *bprm)
59875 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
59876 may depend on the personality. */
59877 SET_PERSONALITY(loc->elf_ex);
59878+
59879+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
59880+ current->mm->pax_flags = 0UL;
59881+#endif
59882+
59883+#ifdef CONFIG_PAX_DLRESOLVE
59884+ current->mm->call_dl_resolve = 0UL;
59885+#endif
59886+
59887+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
59888+ current->mm->call_syscall = 0UL;
59889+#endif
59890+
59891+#ifdef CONFIG_PAX_ASLR
59892+ current->mm->delta_mmap = 0UL;
59893+ current->mm->delta_stack = 0UL;
59894+#endif
59895+
59896+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
59897+ if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
59898+ send_sig(SIGKILL, current, 0);
59899+ goto out_free_dentry;
59900+ }
59901+#endif
59902+
59903+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
59904+ pax_set_initial_flags(bprm);
59905+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
59906+ if (pax_set_initial_flags_func)
59907+ (pax_set_initial_flags_func)(bprm);
59908+#endif
59909+
59910+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
59911+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
59912+ current->mm->context.user_cs_limit = PAGE_SIZE;
59913+ current->mm->def_flags |= VM_PAGEEXEC | VM_NOHUGEPAGE;
59914+ }
59915+#endif
59916+
59917+#ifdef CONFIG_PAX_SEGMEXEC
59918+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
59919+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
59920+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
59921+ pax_task_size = SEGMEXEC_TASK_SIZE;
59922+ current->mm->def_flags |= VM_NOHUGEPAGE;
59923+ } else
59924+#endif
59925+
59926+ pax_task_size = TASK_SIZE;
59927+
59928+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
59929+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
59930+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
59931+ put_cpu();
59932+ }
59933+#endif
59934+
59935+#ifdef CONFIG_PAX_ASLR
59936+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
59937+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
59938+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
59939+ }
59940+#endif
59941+
59942+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
59943+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
59944+ executable_stack = EXSTACK_DISABLE_X;
59945+ current->personality &= ~READ_IMPLIES_EXEC;
59946+ } else
59947+#endif
59948+
59949 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
59950 current->personality |= READ_IMPLIES_EXEC;
59951
59952@@ -815,6 +1253,20 @@ static int load_elf_binary(struct linux_binprm *bprm)
59953 #else
59954 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
59955 #endif
59956+
59957+#ifdef CONFIG_PAX_RANDMMAP
59958+ /* PaX: randomize base address at the default exe base if requested */
59959+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
59960+#ifdef CONFIG_SPARC64
59961+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
59962+#else
59963+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
59964+#endif
59965+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
59966+ elf_flags |= MAP_FIXED;
59967+ }
59968+#endif
59969+
59970 }
59971
59972 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
59973@@ -847,9 +1299,9 @@ static int load_elf_binary(struct linux_binprm *bprm)
59974 * allowed task size. Note that p_filesz must always be
59975 * <= p_memsz so it is only necessary to check p_memsz.
59976 */
59977- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
59978- elf_ppnt->p_memsz > TASK_SIZE ||
59979- TASK_SIZE - elf_ppnt->p_memsz < k) {
59980+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
59981+ elf_ppnt->p_memsz > pax_task_size ||
59982+ pax_task_size - elf_ppnt->p_memsz < k) {
59983 /* set_brk can never work. Avoid overflows. */
59984 send_sig(SIGKILL, current, 0);
59985 retval = -EINVAL;
59986@@ -888,17 +1340,45 @@ static int load_elf_binary(struct linux_binprm *bprm)
59987 goto out_free_dentry;
59988 }
59989 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
59990- send_sig(SIGSEGV, current, 0);
59991- retval = -EFAULT; /* Nobody gets to see this, but.. */
59992- goto out_free_dentry;
59993+ /*
59994+ * This bss-zeroing can fail if the ELF
59995+ * file specifies odd protections. So
59996+ * we don't check the return value
59997+ */
59998 }
59999
60000+#ifdef CONFIG_PAX_RANDMMAP
60001+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
60002+ unsigned long start, size, flags;
60003+ vm_flags_t vm_flags;
60004+
60005+ start = ELF_PAGEALIGN(elf_brk);
60006+ size = PAGE_SIZE + ((pax_get_random_long() & ((1UL << 22) - 1UL)) << 4);
60007+ flags = MAP_FIXED | MAP_PRIVATE;
60008+ vm_flags = VM_DONTEXPAND | VM_DONTDUMP;
60009+
60010+ down_write(&current->mm->mmap_sem);
60011+ start = get_unmapped_area(NULL, start, PAGE_ALIGN(size), 0, flags);
60012+ retval = -ENOMEM;
60013+ if (!IS_ERR_VALUE(start) && !find_vma_intersection(current->mm, start, start + size + PAGE_SIZE)) {
60014+// if (current->personality & ADDR_NO_RANDOMIZE)
60015+// vm_flags |= VM_READ | VM_MAYREAD;
60016+ start = mmap_region(NULL, start, PAGE_ALIGN(size), vm_flags, 0);
60017+ retval = IS_ERR_VALUE(start) ? start : 0;
60018+ }
60019+ up_write(&current->mm->mmap_sem);
60020+ if (retval == 0)
60021+ retval = set_brk(start + size, start + size + PAGE_SIZE);
60022+ if (retval < 0) {
60023+ send_sig(SIGKILL, current, 0);
60024+ goto out_free_dentry;
60025+ }
60026+ }
60027+#endif
60028+
60029 if (elf_interpreter) {
60030- unsigned long interp_map_addr = 0;
60031-
60032 elf_entry = load_elf_interp(&loc->interp_elf_ex,
60033 interpreter,
60034- &interp_map_addr,
60035 load_bias);
60036 if (!IS_ERR((void *)elf_entry)) {
60037 /*
60038@@ -1130,7 +1610,7 @@ static bool always_dump_vma(struct vm_area_struct *vma)
60039 * Decide what to dump of a segment, part, all or none.
60040 */
60041 static unsigned long vma_dump_size(struct vm_area_struct *vma,
60042- unsigned long mm_flags)
60043+ unsigned long mm_flags, long signr)
60044 {
60045 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
60046
60047@@ -1168,7 +1648,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
60048 if (vma->vm_file == NULL)
60049 return 0;
60050
60051- if (FILTER(MAPPED_PRIVATE))
60052+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
60053 goto whole;
60054
60055 /*
60056@@ -1375,9 +1855,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
60057 {
60058 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
60059 int i = 0;
60060- do
60061+ do {
60062 i += 2;
60063- while (auxv[i - 2] != AT_NULL);
60064+ } while (auxv[i - 2] != AT_NULL);
60065 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
60066 }
60067
60068@@ -1386,7 +1866,7 @@ static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata,
60069 {
60070 mm_segment_t old_fs = get_fs();
60071 set_fs(KERNEL_DS);
60072- copy_siginfo_to_user((user_siginfo_t __user *) csigdata, siginfo);
60073+ copy_siginfo_to_user((user_siginfo_t __force_user *) csigdata, siginfo);
60074 set_fs(old_fs);
60075 fill_note(note, "CORE", NT_SIGINFO, sizeof(*csigdata), csigdata);
60076 }
60077@@ -2010,14 +2490,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
60078 }
60079
60080 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
60081- unsigned long mm_flags)
60082+ struct coredump_params *cprm)
60083 {
60084 struct vm_area_struct *vma;
60085 size_t size = 0;
60086
60087 for (vma = first_vma(current, gate_vma); vma != NULL;
60088 vma = next_vma(vma, gate_vma))
60089- size += vma_dump_size(vma, mm_flags);
60090+ size += vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
60091 return size;
60092 }
60093
60094@@ -2108,7 +2588,7 @@ static int elf_core_dump(struct coredump_params *cprm)
60095
60096 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
60097
60098- offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
60099+ offset += elf_core_vma_data_size(gate_vma, cprm);
60100 offset += elf_core_extra_data_size();
60101 e_shoff = offset;
60102
60103@@ -2136,7 +2616,7 @@ static int elf_core_dump(struct coredump_params *cprm)
60104 phdr.p_offset = offset;
60105 phdr.p_vaddr = vma->vm_start;
60106 phdr.p_paddr = 0;
60107- phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
60108+ phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
60109 phdr.p_memsz = vma->vm_end - vma->vm_start;
60110 offset += phdr.p_filesz;
60111 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
60112@@ -2169,7 +2649,7 @@ static int elf_core_dump(struct coredump_params *cprm)
60113 unsigned long addr;
60114 unsigned long end;
60115
60116- end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
60117+ end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
60118
60119 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
60120 struct page *page;
60121@@ -2210,6 +2690,167 @@ out:
60122
60123 #endif /* CONFIG_ELF_CORE */
60124
60125+#ifdef CONFIG_PAX_MPROTECT
60126+/* PaX: non-PIC ELF libraries need relocations on their executable segments
60127+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
60128+ * we'll remove VM_MAYWRITE for good on RELRO segments.
60129+ *
60130+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
60131+ * basis because we want to allow the common case and not the special ones.
60132+ */
60133+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
60134+{
60135+ struct elfhdr elf_h;
60136+ struct elf_phdr elf_p;
60137+ unsigned long i;
60138+ unsigned long oldflags;
60139+ bool is_textrel_rw, is_textrel_rx, is_relro;
60140+
60141+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT) || !vma->vm_file)
60142+ return;
60143+
60144+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
60145+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
60146+
60147+#ifdef CONFIG_PAX_ELFRELOCS
60148+ /* possible TEXTREL */
60149+ is_textrel_rw = !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
60150+ is_textrel_rx = vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
60151+#else
60152+ is_textrel_rw = false;
60153+ is_textrel_rx = false;
60154+#endif
60155+
60156+ /* possible RELRO */
60157+ is_relro = vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
60158+
60159+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
60160+ return;
60161+
60162+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
60163+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
60164+
60165+#ifdef CONFIG_PAX_ETEXECRELOCS
60166+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
60167+#else
60168+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
60169+#endif
60170+
60171+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
60172+ !elf_check_arch(&elf_h) ||
60173+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
60174+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
60175+ return;
60176+
60177+ for (i = 0UL; i < elf_h.e_phnum; i++) {
60178+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
60179+ return;
60180+ switch (elf_p.p_type) {
60181+ case PT_DYNAMIC:
60182+ if (!is_textrel_rw && !is_textrel_rx)
60183+ continue;
60184+ i = 0UL;
60185+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
60186+ elf_dyn dyn;
60187+
60188+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
60189+ break;
60190+ if (dyn.d_tag == DT_NULL)
60191+ break;
60192+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
60193+ gr_log_textrel(vma);
60194+ if (is_textrel_rw)
60195+ vma->vm_flags |= VM_MAYWRITE;
60196+ else
60197+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
60198+ vma->vm_flags &= ~VM_MAYWRITE;
60199+ break;
60200+ }
60201+ i++;
60202+ }
60203+ is_textrel_rw = false;
60204+ is_textrel_rx = false;
60205+ continue;
60206+
60207+ case PT_GNU_RELRO:
60208+ if (!is_relro)
60209+ continue;
60210+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
60211+ vma->vm_flags &= ~VM_MAYWRITE;
60212+ is_relro = false;
60213+ continue;
60214+
60215+#ifdef CONFIG_PAX_PT_PAX_FLAGS
60216+ case PT_PAX_FLAGS: {
60217+ const char *msg_mprotect = "", *msg_emutramp = "";
60218+ char *buffer_lib, *buffer_exe;
60219+
60220+ if (elf_p.p_flags & PF_NOMPROTECT)
60221+ msg_mprotect = "MPROTECT disabled";
60222+
60223+#ifdef CONFIG_PAX_EMUTRAMP
60224+ if (!(vma->vm_mm->pax_flags & MF_PAX_EMUTRAMP) && !(elf_p.p_flags & PF_NOEMUTRAMP))
60225+ msg_emutramp = "EMUTRAMP enabled";
60226+#endif
60227+
60228+ if (!msg_mprotect[0] && !msg_emutramp[0])
60229+ continue;
60230+
60231+ if (!printk_ratelimit())
60232+ continue;
60233+
60234+ buffer_lib = (char *)__get_free_page(GFP_KERNEL);
60235+ buffer_exe = (char *)__get_free_page(GFP_KERNEL);
60236+ if (buffer_lib && buffer_exe) {
60237+ char *path_lib, *path_exe;
60238+
60239+ path_lib = pax_get_path(&vma->vm_file->f_path, buffer_lib, PAGE_SIZE);
60240+ path_exe = pax_get_path(&vma->vm_mm->exe_file->f_path, buffer_exe, PAGE_SIZE);
60241+
60242+ pr_info("PAX: %s wants %s%s%s on %s\n", path_lib, msg_mprotect,
60243+ (msg_mprotect[0] && msg_emutramp[0] ? " and " : ""), msg_emutramp, path_exe);
60244+
60245+ }
60246+ free_page((unsigned long)buffer_exe);
60247+ free_page((unsigned long)buffer_lib);
60248+ continue;
60249+ }
60250+#endif
60251+
60252+ }
60253+ }
60254+}
60255+#endif
60256+
60257+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
60258+
60259+extern int grsec_enable_log_rwxmaps;
60260+
60261+static void elf_handle_mmap(struct file *file)
60262+{
60263+ struct elfhdr elf_h;
60264+ struct elf_phdr elf_p;
60265+ unsigned long i;
60266+
60267+ if (!grsec_enable_log_rwxmaps)
60268+ return;
60269+
60270+ if (sizeof(elf_h) != kernel_read(file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
60271+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
60272+ (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC) || !elf_check_arch(&elf_h) ||
60273+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
60274+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
60275+ return;
60276+
60277+ for (i = 0UL; i < elf_h.e_phnum; i++) {
60278+ if (sizeof(elf_p) != kernel_read(file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
60279+ return;
60280+ if (elf_p.p_type == PT_GNU_STACK && (elf_p.p_flags & PF_X))
60281+ gr_log_ptgnustack(file);
60282+ }
60283+}
60284+#endif
60285+
60286 static int __init init_elf_binfmt(void)
60287 {
60288 register_binfmt(&elf_format);
60289diff --git a/fs/block_dev.c b/fs/block_dev.c
60290index 6d72746..536d1db 100644
60291--- a/fs/block_dev.c
60292+++ b/fs/block_dev.c
60293@@ -701,7 +701,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
60294 else if (bdev->bd_contains == bdev)
60295 return true; /* is a whole device which isn't held */
60296
60297- else if (whole->bd_holder == bd_may_claim)
60298+ else if (whole->bd_holder == (void *)bd_may_claim)
60299 return true; /* is a partition of a device that is being partitioned */
60300 else if (whole->bd_holder != NULL)
60301 return false; /* is a partition of a held device */
60302diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
60303index 44ee5d2..8b23e53 100644
60304--- a/fs/btrfs/ctree.c
60305+++ b/fs/btrfs/ctree.c
60306@@ -1184,9 +1184,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
60307 free_extent_buffer(buf);
60308 add_root_to_dirty_list(root);
60309 } else {
60310- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
60311- parent_start = parent->start;
60312- else
60313+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
60314+ if (parent)
60315+ parent_start = parent->start;
60316+ else
60317+ parent_start = 0;
60318+ } else
60319 parent_start = 0;
60320
60321 WARN_ON(trans->transid != btrfs_header_generation(parent));
60322diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
60323index a2e90f8..5135e5f 100644
60324--- a/fs/btrfs/delayed-inode.c
60325+++ b/fs/btrfs/delayed-inode.c
60326@@ -462,7 +462,7 @@ static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
60327
60328 static void finish_one_item(struct btrfs_delayed_root *delayed_root)
60329 {
60330- int seq = atomic_inc_return(&delayed_root->items_seq);
60331+ int seq = atomic_inc_return_unchecked(&delayed_root->items_seq);
60332 if ((atomic_dec_return(&delayed_root->items) <
60333 BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0) &&
60334 waitqueue_active(&delayed_root->wait))
60335@@ -1412,7 +1412,7 @@ void btrfs_assert_delayed_root_empty(struct btrfs_root *root)
60336
60337 static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
60338 {
60339- int val = atomic_read(&delayed_root->items_seq);
60340+ int val = atomic_read_unchecked(&delayed_root->items_seq);
60341
60342 if (val < seq || val >= seq + BTRFS_DELAYED_BATCH)
60343 return 1;
60344@@ -1436,7 +1436,7 @@ void btrfs_balance_delayed_items(struct btrfs_root *root)
60345 int seq;
60346 int ret;
60347
60348- seq = atomic_read(&delayed_root->items_seq);
60349+ seq = atomic_read_unchecked(&delayed_root->items_seq);
60350
60351 ret = btrfs_wq_run_delayed_node(delayed_root, root, 0);
60352 if (ret)
60353diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h
60354index f70119f..ab5894d 100644
60355--- a/fs/btrfs/delayed-inode.h
60356+++ b/fs/btrfs/delayed-inode.h
60357@@ -43,7 +43,7 @@ struct btrfs_delayed_root {
60358 */
60359 struct list_head prepare_list;
60360 atomic_t items; /* for delayed items */
60361- atomic_t items_seq; /* for delayed items */
60362+ atomic_unchecked_t items_seq; /* for delayed items */
60363 int nodes; /* for delayed nodes */
60364 wait_queue_head_t wait;
60365 };
60366@@ -90,7 +90,7 @@ static inline void btrfs_init_delayed_root(
60367 struct btrfs_delayed_root *delayed_root)
60368 {
60369 atomic_set(&delayed_root->items, 0);
60370- atomic_set(&delayed_root->items_seq, 0);
60371+ atomic_set_unchecked(&delayed_root->items_seq, 0);
60372 delayed_root->nodes = 0;
60373 spin_lock_init(&delayed_root->lock);
60374 init_waitqueue_head(&delayed_root->wait);
60375diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
60376index b765d41..5a8b0c3 100644
60377--- a/fs/btrfs/ioctl.c
60378+++ b/fs/btrfs/ioctl.c
60379@@ -3975,9 +3975,12 @@ static long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
60380 for (i = 0; i < num_types; i++) {
60381 struct btrfs_space_info *tmp;
60382
60383+ /* Don't copy in more than we allocated */
60384 if (!slot_count)
60385 break;
60386
60387+ slot_count--;
60388+
60389 info = NULL;
60390 rcu_read_lock();
60391 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
60392@@ -3999,10 +4002,7 @@ static long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
60393 memcpy(dest, &space, sizeof(space));
60394 dest++;
60395 space_args.total_spaces++;
60396- slot_count--;
60397 }
60398- if (!slot_count)
60399- break;
60400 }
60401 up_read(&info->groups_sem);
60402 }
60403diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
60404index c4124de..d7613eb6 100644
60405--- a/fs/btrfs/super.c
60406+++ b/fs/btrfs/super.c
60407@@ -270,7 +270,7 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
60408 function, line, errstr);
60409 return;
60410 }
60411- ACCESS_ONCE(trans->transaction->aborted) = errno;
60412+ ACCESS_ONCE_RW(trans->transaction->aborted) = errno;
60413 /* Wake up anybody who may be waiting on this transaction */
60414 wake_up(&root->fs_info->transaction_wait);
60415 wake_up(&root->fs_info->transaction_blocked_wait);
60416diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
60417index 12e5355..cdf30c6 100644
60418--- a/fs/btrfs/sysfs.c
60419+++ b/fs/btrfs/sysfs.c
60420@@ -475,7 +475,7 @@ static int addrm_unknown_feature_attrs(struct btrfs_fs_info *fs_info, bool add)
60421 for (set = 0; set < FEAT_MAX; set++) {
60422 int i;
60423 struct attribute *attrs[2];
60424- struct attribute_group agroup = {
60425+ attribute_group_no_const agroup = {
60426 .name = "features",
60427 .attrs = attrs,
60428 };
60429diff --git a/fs/btrfs/tree-log.h b/fs/btrfs/tree-log.h
60430index e2e798a..f454c18 100644
60431--- a/fs/btrfs/tree-log.h
60432+++ b/fs/btrfs/tree-log.h
60433@@ -41,7 +41,7 @@ static inline void btrfs_init_log_ctx(struct btrfs_log_ctx *ctx)
60434 static inline void btrfs_set_log_full_commit(struct btrfs_fs_info *fs_info,
60435 struct btrfs_trans_handle *trans)
60436 {
60437- ACCESS_ONCE(fs_info->last_trans_log_full_commit) = trans->transid;
60438+ ACCESS_ONCE_RW(fs_info->last_trans_log_full_commit) = trans->transid;
60439 }
60440
60441 static inline int btrfs_need_log_full_commit(struct btrfs_fs_info *fs_info,
60442diff --git a/fs/buffer.c b/fs/buffer.c
60443index 3588a80..3d038a9 100644
60444--- a/fs/buffer.c
60445+++ b/fs/buffer.c
60446@@ -2318,6 +2318,11 @@ static int cont_expand_zero(struct file *file, struct address_space *mapping,
60447 err = 0;
60448
60449 balance_dirty_pages_ratelimited(mapping);
60450+
60451+ if (unlikely(fatal_signal_pending(current))) {
60452+ err = -EINTR;
60453+ goto out;
60454+ }
60455 }
60456
60457 /* page covers the boundary, find the boundary offset */
60458@@ -3424,7 +3429,7 @@ void __init buffer_init(void)
60459 bh_cachep = kmem_cache_create("buffer_head",
60460 sizeof(struct buffer_head), 0,
60461 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
60462- SLAB_MEM_SPREAD),
60463+ SLAB_MEM_SPREAD|SLAB_NO_SANITIZE),
60464 NULL);
60465
60466 /*
60467diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
60468index fbb08e9..0fda764 100644
60469--- a/fs/cachefiles/bind.c
60470+++ b/fs/cachefiles/bind.c
60471@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
60472 args);
60473
60474 /* start by checking things over */
60475- ASSERT(cache->fstop_percent >= 0 &&
60476- cache->fstop_percent < cache->fcull_percent &&
60477+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
60478 cache->fcull_percent < cache->frun_percent &&
60479 cache->frun_percent < 100);
60480
60481- ASSERT(cache->bstop_percent >= 0 &&
60482- cache->bstop_percent < cache->bcull_percent &&
60483+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
60484 cache->bcull_percent < cache->brun_percent &&
60485 cache->brun_percent < 100);
60486
60487diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
60488index ce1b115..4a6852c 100644
60489--- a/fs/cachefiles/daemon.c
60490+++ b/fs/cachefiles/daemon.c
60491@@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
60492 if (n > buflen)
60493 return -EMSGSIZE;
60494
60495- if (copy_to_user(_buffer, buffer, n) != 0)
60496+ if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
60497 return -EFAULT;
60498
60499 return n;
60500@@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
60501 if (test_bit(CACHEFILES_DEAD, &cache->flags))
60502 return -EIO;
60503
60504- if (datalen < 0 || datalen > PAGE_SIZE - 1)
60505+ if (datalen > PAGE_SIZE - 1)
60506 return -EOPNOTSUPP;
60507
60508 /* drag the command string into the kernel so we can parse it */
60509@@ -385,7 +385,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
60510 if (args[0] != '%' || args[1] != '\0')
60511 return -EINVAL;
60512
60513- if (fstop < 0 || fstop >= cache->fcull_percent)
60514+ if (fstop >= cache->fcull_percent)
60515 return cachefiles_daemon_range_error(cache, args);
60516
60517 cache->fstop_percent = fstop;
60518@@ -457,7 +457,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
60519 if (args[0] != '%' || args[1] != '\0')
60520 return -EINVAL;
60521
60522- if (bstop < 0 || bstop >= cache->bcull_percent)
60523+ if (bstop >= cache->bcull_percent)
60524 return cachefiles_daemon_range_error(cache, args);
60525
60526 cache->bstop_percent = bstop;
60527diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
60528index 8c52472..c4e3a69 100644
60529--- a/fs/cachefiles/internal.h
60530+++ b/fs/cachefiles/internal.h
60531@@ -66,7 +66,7 @@ struct cachefiles_cache {
60532 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
60533 struct rb_root active_nodes; /* active nodes (can't be culled) */
60534 rwlock_t active_lock; /* lock for active_nodes */
60535- atomic_t gravecounter; /* graveyard uniquifier */
60536+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
60537 unsigned frun_percent; /* when to stop culling (% files) */
60538 unsigned fcull_percent; /* when to start culling (% files) */
60539 unsigned fstop_percent; /* when to stop allocating (% files) */
60540@@ -178,19 +178,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
60541 * proc.c
60542 */
60543 #ifdef CONFIG_CACHEFILES_HISTOGRAM
60544-extern atomic_t cachefiles_lookup_histogram[HZ];
60545-extern atomic_t cachefiles_mkdir_histogram[HZ];
60546-extern atomic_t cachefiles_create_histogram[HZ];
60547+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
60548+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
60549+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
60550
60551 extern int __init cachefiles_proc_init(void);
60552 extern void cachefiles_proc_cleanup(void);
60553 static inline
60554-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
60555+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
60556 {
60557 unsigned long jif = jiffies - start_jif;
60558 if (jif >= HZ)
60559 jif = HZ - 1;
60560- atomic_inc(&histogram[jif]);
60561+ atomic_inc_unchecked(&histogram[jif]);
60562 }
60563
60564 #else
60565diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
60566index dad7d95..07475af 100644
60567--- a/fs/cachefiles/namei.c
60568+++ b/fs/cachefiles/namei.c
60569@@ -312,7 +312,7 @@ try_again:
60570 /* first step is to make up a grave dentry in the graveyard */
60571 sprintf(nbuffer, "%08x%08x",
60572 (uint32_t) get_seconds(),
60573- (uint32_t) atomic_inc_return(&cache->gravecounter));
60574+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
60575
60576 /* do the multiway lock magic */
60577 trap = lock_rename(cache->graveyard, dir);
60578diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
60579index eccd339..4c1d995 100644
60580--- a/fs/cachefiles/proc.c
60581+++ b/fs/cachefiles/proc.c
60582@@ -14,9 +14,9 @@
60583 #include <linux/seq_file.h>
60584 #include "internal.h"
60585
60586-atomic_t cachefiles_lookup_histogram[HZ];
60587-atomic_t cachefiles_mkdir_histogram[HZ];
60588-atomic_t cachefiles_create_histogram[HZ];
60589+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
60590+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
60591+atomic_unchecked_t cachefiles_create_histogram[HZ];
60592
60593 /*
60594 * display the latency histogram
60595@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
60596 return 0;
60597 default:
60598 index = (unsigned long) v - 3;
60599- x = atomic_read(&cachefiles_lookup_histogram[index]);
60600- y = atomic_read(&cachefiles_mkdir_histogram[index]);
60601- z = atomic_read(&cachefiles_create_histogram[index]);
60602+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
60603+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
60604+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
60605 if (x == 0 && y == 0 && z == 0)
60606 return 0;
60607
60608diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
60609index 25e745b..220e604 100644
60610--- a/fs/cachefiles/rdwr.c
60611+++ b/fs/cachefiles/rdwr.c
60612@@ -937,7 +937,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
60613 old_fs = get_fs();
60614 set_fs(KERNEL_DS);
60615 ret = file->f_op->write(
60616- file, (const void __user *) data, len, &pos);
60617+ file, (const void __force_user *) data, len, &pos);
60618 set_fs(old_fs);
60619 kunmap(page);
60620 file_end_write(file);
60621diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
60622index c29d6ae..719b9bb 100644
60623--- a/fs/ceph/dir.c
60624+++ b/fs/ceph/dir.c
60625@@ -129,6 +129,8 @@ static int __dcache_readdir(struct file *file, struct dir_context *ctx,
60626 struct dentry *dentry, *last;
60627 struct ceph_dentry_info *di;
60628 int err = 0;
60629+ char d_name[DNAME_INLINE_LEN];
60630+ const unsigned char *name;
60631
60632 /* claim ref on last dentry we returned */
60633 last = fi->dentry;
60634@@ -192,7 +194,12 @@ more:
60635
60636 dout(" %llu (%llu) dentry %p %.*s %p\n", di->offset, ctx->pos,
60637 dentry, dentry->d_name.len, dentry->d_name.name, dentry->d_inode);
60638- if (!dir_emit(ctx, dentry->d_name.name,
60639+ name = dentry->d_name.name;
60640+ if (name == dentry->d_iname) {
60641+ memcpy(d_name, name, dentry->d_name.len);
60642+ name = d_name;
60643+ }
60644+ if (!dir_emit(ctx, name,
60645 dentry->d_name.len,
60646 ceph_translate_ino(dentry->d_sb, dentry->d_inode->i_ino),
60647 dentry->d_inode->i_mode >> 12)) {
60648@@ -250,7 +257,7 @@ static int ceph_readdir(struct file *file, struct dir_context *ctx)
60649 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
60650 struct ceph_mds_client *mdsc = fsc->mdsc;
60651 unsigned frag = fpos_frag(ctx->pos);
60652- int off = fpos_off(ctx->pos);
60653+ unsigned int off = fpos_off(ctx->pos);
60654 int err;
60655 u32 ftype;
60656 struct ceph_mds_reply_info_parsed *rinfo;
60657diff --git a/fs/ceph/ioctl.c b/fs/ceph/ioctl.c
60658index a822a6e..4644256 100644
60659--- a/fs/ceph/ioctl.c
60660+++ b/fs/ceph/ioctl.c
60661@@ -41,7 +41,7 @@ static long __validate_layout(struct ceph_mds_client *mdsc,
60662 /* validate striping parameters */
60663 if ((l->object_size & ~PAGE_MASK) ||
60664 (l->stripe_unit & ~PAGE_MASK) ||
60665- (l->stripe_unit != 0 &&
60666+ ((unsigned)l->stripe_unit != 0 &&
60667 ((unsigned)l->object_size % (unsigned)l->stripe_unit)))
60668 return -EINVAL;
60669
60670diff --git a/fs/ceph/super.c b/fs/ceph/super.c
60671index f6e1237..796ffd1 100644
60672--- a/fs/ceph/super.c
60673+++ b/fs/ceph/super.c
60674@@ -895,7 +895,7 @@ static int ceph_compare_super(struct super_block *sb, void *data)
60675 /*
60676 * construct our own bdi so we can control readahead, etc.
60677 */
60678-static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
60679+static atomic_long_unchecked_t bdi_seq = ATOMIC_LONG_INIT(0);
60680
60681 static int ceph_register_bdi(struct super_block *sb,
60682 struct ceph_fs_client *fsc)
60683@@ -912,7 +912,7 @@ static int ceph_register_bdi(struct super_block *sb,
60684 default_backing_dev_info.ra_pages;
60685
60686 err = bdi_register(&fsc->backing_dev_info, NULL, "ceph-%ld",
60687- atomic_long_inc_return(&bdi_seq));
60688+ atomic_long_inc_return_unchecked(&bdi_seq));
60689 if (!err)
60690 sb->s_bdi = &fsc->backing_dev_info;
60691 return err;
60692diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
60693index 44ec726..bcb06a3 100644
60694--- a/fs/cifs/cifs_debug.c
60695+++ b/fs/cifs/cifs_debug.c
60696@@ -286,8 +286,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
60697
60698 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
60699 #ifdef CONFIG_CIFS_STATS2
60700- atomic_set(&totBufAllocCount, 0);
60701- atomic_set(&totSmBufAllocCount, 0);
60702+ atomic_set_unchecked(&totBufAllocCount, 0);
60703+ atomic_set_unchecked(&totSmBufAllocCount, 0);
60704 #endif /* CONFIG_CIFS_STATS2 */
60705 spin_lock(&cifs_tcp_ses_lock);
60706 list_for_each(tmp1, &cifs_tcp_ses_list) {
60707@@ -300,7 +300,7 @@ static ssize_t cifs_stats_proc_write(struct file *file,
60708 tcon = list_entry(tmp3,
60709 struct cifs_tcon,
60710 tcon_list);
60711- atomic_set(&tcon->num_smbs_sent, 0);
60712+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
60713 if (server->ops->clear_stats)
60714 server->ops->clear_stats(tcon);
60715 }
60716@@ -332,8 +332,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
60717 smBufAllocCount.counter, cifs_min_small);
60718 #ifdef CONFIG_CIFS_STATS2
60719 seq_printf(m, "Total Large %d Small %d Allocations\n",
60720- atomic_read(&totBufAllocCount),
60721- atomic_read(&totSmBufAllocCount));
60722+ atomic_read_unchecked(&totBufAllocCount),
60723+ atomic_read_unchecked(&totSmBufAllocCount));
60724 #endif /* CONFIG_CIFS_STATS2 */
60725
60726 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
60727@@ -362,7 +362,7 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
60728 if (tcon->need_reconnect)
60729 seq_puts(m, "\tDISCONNECTED ");
60730 seq_printf(m, "\nSMBs: %d",
60731- atomic_read(&tcon->num_smbs_sent));
60732+ atomic_read_unchecked(&tcon->num_smbs_sent));
60733 if (server->ops->print_stats)
60734 server->ops->print_stats(m, tcon);
60735 }
60736diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
60737index 889b984..fcb8431 100644
60738--- a/fs/cifs/cifsfs.c
60739+++ b/fs/cifs/cifsfs.c
60740@@ -1092,7 +1092,7 @@ cifs_init_request_bufs(void)
60741 */
60742 cifs_req_cachep = kmem_cache_create("cifs_request",
60743 CIFSMaxBufSize + max_hdr_size, 0,
60744- SLAB_HWCACHE_ALIGN, NULL);
60745+ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
60746 if (cifs_req_cachep == NULL)
60747 return -ENOMEM;
60748
60749@@ -1119,7 +1119,7 @@ cifs_init_request_bufs(void)
60750 efficient to alloc 1 per page off the slab compared to 17K (5page)
60751 alloc of large cifs buffers even when page debugging is on */
60752 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
60753- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
60754+ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
60755 NULL);
60756 if (cifs_sm_req_cachep == NULL) {
60757 mempool_destroy(cifs_req_poolp);
60758@@ -1204,8 +1204,8 @@ init_cifs(void)
60759 atomic_set(&bufAllocCount, 0);
60760 atomic_set(&smBufAllocCount, 0);
60761 #ifdef CONFIG_CIFS_STATS2
60762- atomic_set(&totBufAllocCount, 0);
60763- atomic_set(&totSmBufAllocCount, 0);
60764+ atomic_set_unchecked(&totBufAllocCount, 0);
60765+ atomic_set_unchecked(&totSmBufAllocCount, 0);
60766 #endif /* CONFIG_CIFS_STATS2 */
60767
60768 atomic_set(&midCount, 0);
60769diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
60770index 25b8392..01e46dc 100644
60771--- a/fs/cifs/cifsglob.h
60772+++ b/fs/cifs/cifsglob.h
60773@@ -821,35 +821,35 @@ struct cifs_tcon {
60774 __u16 Flags; /* optional support bits */
60775 enum statusEnum tidStatus;
60776 #ifdef CONFIG_CIFS_STATS
60777- atomic_t num_smbs_sent;
60778+ atomic_unchecked_t num_smbs_sent;
60779 union {
60780 struct {
60781- atomic_t num_writes;
60782- atomic_t num_reads;
60783- atomic_t num_flushes;
60784- atomic_t num_oplock_brks;
60785- atomic_t num_opens;
60786- atomic_t num_closes;
60787- atomic_t num_deletes;
60788- atomic_t num_mkdirs;
60789- atomic_t num_posixopens;
60790- atomic_t num_posixmkdirs;
60791- atomic_t num_rmdirs;
60792- atomic_t num_renames;
60793- atomic_t num_t2renames;
60794- atomic_t num_ffirst;
60795- atomic_t num_fnext;
60796- atomic_t num_fclose;
60797- atomic_t num_hardlinks;
60798- atomic_t num_symlinks;
60799- atomic_t num_locks;
60800- atomic_t num_acl_get;
60801- atomic_t num_acl_set;
60802+ atomic_unchecked_t num_writes;
60803+ atomic_unchecked_t num_reads;
60804+ atomic_unchecked_t num_flushes;
60805+ atomic_unchecked_t num_oplock_brks;
60806+ atomic_unchecked_t num_opens;
60807+ atomic_unchecked_t num_closes;
60808+ atomic_unchecked_t num_deletes;
60809+ atomic_unchecked_t num_mkdirs;
60810+ atomic_unchecked_t num_posixopens;
60811+ atomic_unchecked_t num_posixmkdirs;
60812+ atomic_unchecked_t num_rmdirs;
60813+ atomic_unchecked_t num_renames;
60814+ atomic_unchecked_t num_t2renames;
60815+ atomic_unchecked_t num_ffirst;
60816+ atomic_unchecked_t num_fnext;
60817+ atomic_unchecked_t num_fclose;
60818+ atomic_unchecked_t num_hardlinks;
60819+ atomic_unchecked_t num_symlinks;
60820+ atomic_unchecked_t num_locks;
60821+ atomic_unchecked_t num_acl_get;
60822+ atomic_unchecked_t num_acl_set;
60823 } cifs_stats;
60824 #ifdef CONFIG_CIFS_SMB2
60825 struct {
60826- atomic_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
60827- atomic_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
60828+ atomic_unchecked_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
60829+ atomic_unchecked_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
60830 } smb2_stats;
60831 #endif /* CONFIG_CIFS_SMB2 */
60832 } stats;
60833@@ -1190,7 +1190,7 @@ convert_delimiter(char *path, char delim)
60834 }
60835
60836 #ifdef CONFIG_CIFS_STATS
60837-#define cifs_stats_inc atomic_inc
60838+#define cifs_stats_inc atomic_inc_unchecked
60839
60840 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
60841 unsigned int bytes)
60842@@ -1557,8 +1557,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
60843 /* Various Debug counters */
60844 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
60845 #ifdef CONFIG_CIFS_STATS2
60846-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
60847-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
60848+GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
60849+GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
60850 #endif
60851 GLOBAL_EXTERN atomic_t smBufAllocCount;
60852 GLOBAL_EXTERN atomic_t midCount;
60853diff --git a/fs/cifs/file.c b/fs/cifs/file.c
60854index 5f29354..359bc0d 100644
60855--- a/fs/cifs/file.c
60856+++ b/fs/cifs/file.c
60857@@ -2056,10 +2056,14 @@ static int cifs_writepages(struct address_space *mapping,
60858 index = mapping->writeback_index; /* Start from prev offset */
60859 end = -1;
60860 } else {
60861- index = wbc->range_start >> PAGE_CACHE_SHIFT;
60862- end = wbc->range_end >> PAGE_CACHE_SHIFT;
60863- if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
60864+ if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
60865 range_whole = true;
60866+ index = 0;
60867+ end = ULONG_MAX;
60868+ } else {
60869+ index = wbc->range_start >> PAGE_CACHE_SHIFT;
60870+ end = wbc->range_end >> PAGE_CACHE_SHIFT;
60871+ }
60872 scanned = true;
60873 }
60874 server = cifs_sb_master_tcon(cifs_sb)->ses->server;
60875diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
60876index b7415d5..3984ec0 100644
60877--- a/fs/cifs/misc.c
60878+++ b/fs/cifs/misc.c
60879@@ -170,7 +170,7 @@ cifs_buf_get(void)
60880 memset(ret_buf, 0, buf_size + 3);
60881 atomic_inc(&bufAllocCount);
60882 #ifdef CONFIG_CIFS_STATS2
60883- atomic_inc(&totBufAllocCount);
60884+ atomic_inc_unchecked(&totBufAllocCount);
60885 #endif /* CONFIG_CIFS_STATS2 */
60886 }
60887
60888@@ -205,7 +205,7 @@ cifs_small_buf_get(void)
60889 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
60890 atomic_inc(&smBufAllocCount);
60891 #ifdef CONFIG_CIFS_STATS2
60892- atomic_inc(&totSmBufAllocCount);
60893+ atomic_inc_unchecked(&totSmBufAllocCount);
60894 #endif /* CONFIG_CIFS_STATS2 */
60895
60896 }
60897diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
60898index 52131d8..fd79e97 100644
60899--- a/fs/cifs/smb1ops.c
60900+++ b/fs/cifs/smb1ops.c
60901@@ -626,27 +626,27 @@ static void
60902 cifs_clear_stats(struct cifs_tcon *tcon)
60903 {
60904 #ifdef CONFIG_CIFS_STATS
60905- atomic_set(&tcon->stats.cifs_stats.num_writes, 0);
60906- atomic_set(&tcon->stats.cifs_stats.num_reads, 0);
60907- atomic_set(&tcon->stats.cifs_stats.num_flushes, 0);
60908- atomic_set(&tcon->stats.cifs_stats.num_oplock_brks, 0);
60909- atomic_set(&tcon->stats.cifs_stats.num_opens, 0);
60910- atomic_set(&tcon->stats.cifs_stats.num_posixopens, 0);
60911- atomic_set(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
60912- atomic_set(&tcon->stats.cifs_stats.num_closes, 0);
60913- atomic_set(&tcon->stats.cifs_stats.num_deletes, 0);
60914- atomic_set(&tcon->stats.cifs_stats.num_mkdirs, 0);
60915- atomic_set(&tcon->stats.cifs_stats.num_rmdirs, 0);
60916- atomic_set(&tcon->stats.cifs_stats.num_renames, 0);
60917- atomic_set(&tcon->stats.cifs_stats.num_t2renames, 0);
60918- atomic_set(&tcon->stats.cifs_stats.num_ffirst, 0);
60919- atomic_set(&tcon->stats.cifs_stats.num_fnext, 0);
60920- atomic_set(&tcon->stats.cifs_stats.num_fclose, 0);
60921- atomic_set(&tcon->stats.cifs_stats.num_hardlinks, 0);
60922- atomic_set(&tcon->stats.cifs_stats.num_symlinks, 0);
60923- atomic_set(&tcon->stats.cifs_stats.num_locks, 0);
60924- atomic_set(&tcon->stats.cifs_stats.num_acl_get, 0);
60925- atomic_set(&tcon->stats.cifs_stats.num_acl_set, 0);
60926+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_writes, 0);
60927+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_reads, 0);
60928+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_flushes, 0);
60929+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_oplock_brks, 0);
60930+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_opens, 0);
60931+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixopens, 0);
60932+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
60933+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_closes, 0);
60934+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_deletes, 0);
60935+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_mkdirs, 0);
60936+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_rmdirs, 0);
60937+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_renames, 0);
60938+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_t2renames, 0);
60939+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_ffirst, 0);
60940+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_fnext, 0);
60941+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_fclose, 0);
60942+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_hardlinks, 0);
60943+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_symlinks, 0);
60944+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_locks, 0);
60945+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_get, 0);
60946+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_set, 0);
60947 #endif
60948 }
60949
60950@@ -655,36 +655,36 @@ cifs_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
60951 {
60952 #ifdef CONFIG_CIFS_STATS
60953 seq_printf(m, " Oplocks breaks: %d",
60954- atomic_read(&tcon->stats.cifs_stats.num_oplock_brks));
60955+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_oplock_brks));
60956 seq_printf(m, "\nReads: %d Bytes: %llu",
60957- atomic_read(&tcon->stats.cifs_stats.num_reads),
60958+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_reads),
60959 (long long)(tcon->bytes_read));
60960 seq_printf(m, "\nWrites: %d Bytes: %llu",
60961- atomic_read(&tcon->stats.cifs_stats.num_writes),
60962+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_writes),
60963 (long long)(tcon->bytes_written));
60964 seq_printf(m, "\nFlushes: %d",
60965- atomic_read(&tcon->stats.cifs_stats.num_flushes));
60966+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_flushes));
60967 seq_printf(m, "\nLocks: %d HardLinks: %d Symlinks: %d",
60968- atomic_read(&tcon->stats.cifs_stats.num_locks),
60969- atomic_read(&tcon->stats.cifs_stats.num_hardlinks),
60970- atomic_read(&tcon->stats.cifs_stats.num_symlinks));
60971+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_locks),
60972+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_hardlinks),
60973+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_symlinks));
60974 seq_printf(m, "\nOpens: %d Closes: %d Deletes: %d",
60975- atomic_read(&tcon->stats.cifs_stats.num_opens),
60976- atomic_read(&tcon->stats.cifs_stats.num_closes),
60977- atomic_read(&tcon->stats.cifs_stats.num_deletes));
60978+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_opens),
60979+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_closes),
60980+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_deletes));
60981 seq_printf(m, "\nPosix Opens: %d Posix Mkdirs: %d",
60982- atomic_read(&tcon->stats.cifs_stats.num_posixopens),
60983- atomic_read(&tcon->stats.cifs_stats.num_posixmkdirs));
60984+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixopens),
60985+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs));
60986 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
60987- atomic_read(&tcon->stats.cifs_stats.num_mkdirs),
60988- atomic_read(&tcon->stats.cifs_stats.num_rmdirs));
60989+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_mkdirs),
60990+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_rmdirs));
60991 seq_printf(m, "\nRenames: %d T2 Renames %d",
60992- atomic_read(&tcon->stats.cifs_stats.num_renames),
60993- atomic_read(&tcon->stats.cifs_stats.num_t2renames));
60994+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_renames),
60995+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_t2renames));
60996 seq_printf(m, "\nFindFirst: %d FNext %d FClose %d",
60997- atomic_read(&tcon->stats.cifs_stats.num_ffirst),
60998- atomic_read(&tcon->stats.cifs_stats.num_fnext),
60999- atomic_read(&tcon->stats.cifs_stats.num_fclose));
61000+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_ffirst),
61001+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_fnext),
61002+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_fclose));
61003 #endif
61004 }
61005
61006diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
61007index f522193..586121b 100644
61008--- a/fs/cifs/smb2ops.c
61009+++ b/fs/cifs/smb2ops.c
61010@@ -414,8 +414,8 @@ smb2_clear_stats(struct cifs_tcon *tcon)
61011 #ifdef CONFIG_CIFS_STATS
61012 int i;
61013 for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++) {
61014- atomic_set(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
61015- atomic_set(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
61016+ atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
61017+ atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
61018 }
61019 #endif
61020 }
61021@@ -455,65 +455,65 @@ static void
61022 smb2_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
61023 {
61024 #ifdef CONFIG_CIFS_STATS
61025- atomic_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
61026- atomic_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
61027+ atomic_unchecked_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
61028+ atomic_unchecked_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
61029 seq_printf(m, "\nNegotiates: %d sent %d failed",
61030- atomic_read(&sent[SMB2_NEGOTIATE_HE]),
61031- atomic_read(&failed[SMB2_NEGOTIATE_HE]));
61032+ atomic_read_unchecked(&sent[SMB2_NEGOTIATE_HE]),
61033+ atomic_read_unchecked(&failed[SMB2_NEGOTIATE_HE]));
61034 seq_printf(m, "\nSessionSetups: %d sent %d failed",
61035- atomic_read(&sent[SMB2_SESSION_SETUP_HE]),
61036- atomic_read(&failed[SMB2_SESSION_SETUP_HE]));
61037+ atomic_read_unchecked(&sent[SMB2_SESSION_SETUP_HE]),
61038+ atomic_read_unchecked(&failed[SMB2_SESSION_SETUP_HE]));
61039 seq_printf(m, "\nLogoffs: %d sent %d failed",
61040- atomic_read(&sent[SMB2_LOGOFF_HE]),
61041- atomic_read(&failed[SMB2_LOGOFF_HE]));
61042+ atomic_read_unchecked(&sent[SMB2_LOGOFF_HE]),
61043+ atomic_read_unchecked(&failed[SMB2_LOGOFF_HE]));
61044 seq_printf(m, "\nTreeConnects: %d sent %d failed",
61045- atomic_read(&sent[SMB2_TREE_CONNECT_HE]),
61046- atomic_read(&failed[SMB2_TREE_CONNECT_HE]));
61047+ atomic_read_unchecked(&sent[SMB2_TREE_CONNECT_HE]),
61048+ atomic_read_unchecked(&failed[SMB2_TREE_CONNECT_HE]));
61049 seq_printf(m, "\nTreeDisconnects: %d sent %d failed",
61050- atomic_read(&sent[SMB2_TREE_DISCONNECT_HE]),
61051- atomic_read(&failed[SMB2_TREE_DISCONNECT_HE]));
61052+ atomic_read_unchecked(&sent[SMB2_TREE_DISCONNECT_HE]),
61053+ atomic_read_unchecked(&failed[SMB2_TREE_DISCONNECT_HE]));
61054 seq_printf(m, "\nCreates: %d sent %d failed",
61055- atomic_read(&sent[SMB2_CREATE_HE]),
61056- atomic_read(&failed[SMB2_CREATE_HE]));
61057+ atomic_read_unchecked(&sent[SMB2_CREATE_HE]),
61058+ atomic_read_unchecked(&failed[SMB2_CREATE_HE]));
61059 seq_printf(m, "\nCloses: %d sent %d failed",
61060- atomic_read(&sent[SMB2_CLOSE_HE]),
61061- atomic_read(&failed[SMB2_CLOSE_HE]));
61062+ atomic_read_unchecked(&sent[SMB2_CLOSE_HE]),
61063+ atomic_read_unchecked(&failed[SMB2_CLOSE_HE]));
61064 seq_printf(m, "\nFlushes: %d sent %d failed",
61065- atomic_read(&sent[SMB2_FLUSH_HE]),
61066- atomic_read(&failed[SMB2_FLUSH_HE]));
61067+ atomic_read_unchecked(&sent[SMB2_FLUSH_HE]),
61068+ atomic_read_unchecked(&failed[SMB2_FLUSH_HE]));
61069 seq_printf(m, "\nReads: %d sent %d failed",
61070- atomic_read(&sent[SMB2_READ_HE]),
61071- atomic_read(&failed[SMB2_READ_HE]));
61072+ atomic_read_unchecked(&sent[SMB2_READ_HE]),
61073+ atomic_read_unchecked(&failed[SMB2_READ_HE]));
61074 seq_printf(m, "\nWrites: %d sent %d failed",
61075- atomic_read(&sent[SMB2_WRITE_HE]),
61076- atomic_read(&failed[SMB2_WRITE_HE]));
61077+ atomic_read_unchecked(&sent[SMB2_WRITE_HE]),
61078+ atomic_read_unchecked(&failed[SMB2_WRITE_HE]));
61079 seq_printf(m, "\nLocks: %d sent %d failed",
61080- atomic_read(&sent[SMB2_LOCK_HE]),
61081- atomic_read(&failed[SMB2_LOCK_HE]));
61082+ atomic_read_unchecked(&sent[SMB2_LOCK_HE]),
61083+ atomic_read_unchecked(&failed[SMB2_LOCK_HE]));
61084 seq_printf(m, "\nIOCTLs: %d sent %d failed",
61085- atomic_read(&sent[SMB2_IOCTL_HE]),
61086- atomic_read(&failed[SMB2_IOCTL_HE]));
61087+ atomic_read_unchecked(&sent[SMB2_IOCTL_HE]),
61088+ atomic_read_unchecked(&failed[SMB2_IOCTL_HE]));
61089 seq_printf(m, "\nCancels: %d sent %d failed",
61090- atomic_read(&sent[SMB2_CANCEL_HE]),
61091- atomic_read(&failed[SMB2_CANCEL_HE]));
61092+ atomic_read_unchecked(&sent[SMB2_CANCEL_HE]),
61093+ atomic_read_unchecked(&failed[SMB2_CANCEL_HE]));
61094 seq_printf(m, "\nEchos: %d sent %d failed",
61095- atomic_read(&sent[SMB2_ECHO_HE]),
61096- atomic_read(&failed[SMB2_ECHO_HE]));
61097+ atomic_read_unchecked(&sent[SMB2_ECHO_HE]),
61098+ atomic_read_unchecked(&failed[SMB2_ECHO_HE]));
61099 seq_printf(m, "\nQueryDirectories: %d sent %d failed",
61100- atomic_read(&sent[SMB2_QUERY_DIRECTORY_HE]),
61101- atomic_read(&failed[SMB2_QUERY_DIRECTORY_HE]));
61102+ atomic_read_unchecked(&sent[SMB2_QUERY_DIRECTORY_HE]),
61103+ atomic_read_unchecked(&failed[SMB2_QUERY_DIRECTORY_HE]));
61104 seq_printf(m, "\nChangeNotifies: %d sent %d failed",
61105- atomic_read(&sent[SMB2_CHANGE_NOTIFY_HE]),
61106- atomic_read(&failed[SMB2_CHANGE_NOTIFY_HE]));
61107+ atomic_read_unchecked(&sent[SMB2_CHANGE_NOTIFY_HE]),
61108+ atomic_read_unchecked(&failed[SMB2_CHANGE_NOTIFY_HE]));
61109 seq_printf(m, "\nQueryInfos: %d sent %d failed",
61110- atomic_read(&sent[SMB2_QUERY_INFO_HE]),
61111- atomic_read(&failed[SMB2_QUERY_INFO_HE]));
61112+ atomic_read_unchecked(&sent[SMB2_QUERY_INFO_HE]),
61113+ atomic_read_unchecked(&failed[SMB2_QUERY_INFO_HE]));
61114 seq_printf(m, "\nSetInfos: %d sent %d failed",
61115- atomic_read(&sent[SMB2_SET_INFO_HE]),
61116- atomic_read(&failed[SMB2_SET_INFO_HE]));
61117+ atomic_read_unchecked(&sent[SMB2_SET_INFO_HE]),
61118+ atomic_read_unchecked(&failed[SMB2_SET_INFO_HE]));
61119 seq_printf(m, "\nOplockBreaks: %d sent %d failed",
61120- atomic_read(&sent[SMB2_OPLOCK_BREAK_HE]),
61121- atomic_read(&failed[SMB2_OPLOCK_BREAK_HE]));
61122+ atomic_read_unchecked(&sent[SMB2_OPLOCK_BREAK_HE]),
61123+ atomic_read_unchecked(&failed[SMB2_OPLOCK_BREAK_HE]));
61124 #endif
61125 }
61126
61127diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
61128index 74b3a66..0c709f3 100644
61129--- a/fs/cifs/smb2pdu.c
61130+++ b/fs/cifs/smb2pdu.c
61131@@ -2143,8 +2143,7 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
61132 default:
61133 cifs_dbg(VFS, "info level %u isn't supported\n",
61134 srch_inf->info_level);
61135- rc = -EINVAL;
61136- goto qdir_exit;
61137+ return -EINVAL;
61138 }
61139
61140 req->FileIndex = cpu_to_le32(index);
61141diff --git a/fs/coda/cache.c b/fs/coda/cache.c
61142index 278f8fd..e69c52d 100644
61143--- a/fs/coda/cache.c
61144+++ b/fs/coda/cache.c
61145@@ -24,7 +24,7 @@
61146 #include "coda_linux.h"
61147 #include "coda_cache.h"
61148
61149-static atomic_t permission_epoch = ATOMIC_INIT(0);
61150+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
61151
61152 /* replace or extend an acl cache hit */
61153 void coda_cache_enter(struct inode *inode, int mask)
61154@@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
61155 struct coda_inode_info *cii = ITOC(inode);
61156
61157 spin_lock(&cii->c_lock);
61158- cii->c_cached_epoch = atomic_read(&permission_epoch);
61159+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
61160 if (!uid_eq(cii->c_uid, current_fsuid())) {
61161 cii->c_uid = current_fsuid();
61162 cii->c_cached_perm = mask;
61163@@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
61164 {
61165 struct coda_inode_info *cii = ITOC(inode);
61166 spin_lock(&cii->c_lock);
61167- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
61168+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
61169 spin_unlock(&cii->c_lock);
61170 }
61171
61172 /* remove all acl caches */
61173 void coda_cache_clear_all(struct super_block *sb)
61174 {
61175- atomic_inc(&permission_epoch);
61176+ atomic_inc_unchecked(&permission_epoch);
61177 }
61178
61179
61180@@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
61181 spin_lock(&cii->c_lock);
61182 hit = (mask & cii->c_cached_perm) == mask &&
61183 uid_eq(cii->c_uid, current_fsuid()) &&
61184- cii->c_cached_epoch == atomic_read(&permission_epoch);
61185+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
61186 spin_unlock(&cii->c_lock);
61187
61188 return hit;
61189diff --git a/fs/compat.c b/fs/compat.c
61190index 66d3d3c..9c10175 100644
61191--- a/fs/compat.c
61192+++ b/fs/compat.c
61193@@ -54,7 +54,7 @@
61194 #include <asm/ioctls.h>
61195 #include "internal.h"
61196
61197-int compat_log = 1;
61198+int compat_log = 0;
61199
61200 int compat_printk(const char *fmt, ...)
61201 {
61202@@ -512,7 +512,7 @@ COMPAT_SYSCALL_DEFINE2(io_setup, unsigned, nr_reqs, u32 __user *, ctx32p)
61203
61204 set_fs(KERNEL_DS);
61205 /* The __user pointer cast is valid because of the set_fs() */
61206- ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
61207+ ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
61208 set_fs(oldfs);
61209 /* truncating is ok because it's a user address */
61210 if (!ret)
61211@@ -562,7 +562,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
61212 goto out;
61213
61214 ret = -EINVAL;
61215- if (nr_segs > UIO_MAXIOV || nr_segs < 0)
61216+ if (nr_segs > UIO_MAXIOV)
61217 goto out;
61218 if (nr_segs > fast_segs) {
61219 ret = -ENOMEM;
61220@@ -850,6 +850,7 @@ struct compat_old_linux_dirent {
61221 struct compat_readdir_callback {
61222 struct dir_context ctx;
61223 struct compat_old_linux_dirent __user *dirent;
61224+ struct file * file;
61225 int result;
61226 };
61227
61228@@ -867,6 +868,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
61229 buf->result = -EOVERFLOW;
61230 return -EOVERFLOW;
61231 }
61232+
61233+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
61234+ return 0;
61235+
61236 buf->result++;
61237 dirent = buf->dirent;
61238 if (!access_ok(VERIFY_WRITE, dirent,
61239@@ -898,6 +903,7 @@ COMPAT_SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
61240 if (!f.file)
61241 return -EBADF;
61242
61243+ buf.file = f.file;
61244 error = iterate_dir(f.file, &buf.ctx);
61245 if (buf.result)
61246 error = buf.result;
61247@@ -917,6 +923,7 @@ struct compat_getdents_callback {
61248 struct dir_context ctx;
61249 struct compat_linux_dirent __user *current_dir;
61250 struct compat_linux_dirent __user *previous;
61251+ struct file * file;
61252 int count;
61253 int error;
61254 };
61255@@ -938,6 +945,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
61256 buf->error = -EOVERFLOW;
61257 return -EOVERFLOW;
61258 }
61259+
61260+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
61261+ return 0;
61262+
61263 dirent = buf->previous;
61264 if (dirent) {
61265 if (__put_user(offset, &dirent->d_off))
61266@@ -983,6 +994,7 @@ COMPAT_SYSCALL_DEFINE3(getdents, unsigned int, fd,
61267 if (!f.file)
61268 return -EBADF;
61269
61270+ buf.file = f.file;
61271 error = iterate_dir(f.file, &buf.ctx);
61272 if (error >= 0)
61273 error = buf.error;
61274@@ -1003,6 +1015,7 @@ struct compat_getdents_callback64 {
61275 struct dir_context ctx;
61276 struct linux_dirent64 __user *current_dir;
61277 struct linux_dirent64 __user *previous;
61278+ struct file * file;
61279 int count;
61280 int error;
61281 };
61282@@ -1019,6 +1032,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
61283 buf->error = -EINVAL; /* only used if we fail.. */
61284 if (reclen > buf->count)
61285 return -EINVAL;
61286+
61287+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
61288+ return 0;
61289+
61290 dirent = buf->previous;
61291
61292 if (dirent) {
61293@@ -1068,6 +1085,7 @@ COMPAT_SYSCALL_DEFINE3(getdents64, unsigned int, fd,
61294 if (!f.file)
61295 return -EBADF;
61296
61297+ buf.file = f.file;
61298 error = iterate_dir(f.file, &buf.ctx);
61299 if (error >= 0)
61300 error = buf.error;
61301diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
61302index 4d24d17..4f8c09e 100644
61303--- a/fs/compat_binfmt_elf.c
61304+++ b/fs/compat_binfmt_elf.c
61305@@ -30,11 +30,13 @@
61306 #undef elf_phdr
61307 #undef elf_shdr
61308 #undef elf_note
61309+#undef elf_dyn
61310 #undef elf_addr_t
61311 #define elfhdr elf32_hdr
61312 #define elf_phdr elf32_phdr
61313 #define elf_shdr elf32_shdr
61314 #define elf_note elf32_note
61315+#define elf_dyn Elf32_Dyn
61316 #define elf_addr_t Elf32_Addr
61317
61318 /*
61319diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
61320index afec645..9c65620 100644
61321--- a/fs/compat_ioctl.c
61322+++ b/fs/compat_ioctl.c
61323@@ -621,7 +621,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
61324 return -EFAULT;
61325 if (__get_user(udata, &ss32->iomem_base))
61326 return -EFAULT;
61327- ss.iomem_base = compat_ptr(udata);
61328+ ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
61329 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
61330 __get_user(ss.port_high, &ss32->port_high))
61331 return -EFAULT;
61332@@ -703,8 +703,8 @@ static int do_i2c_rdwr_ioctl(unsigned int fd, unsigned int cmd,
61333 for (i = 0; i < nmsgs; i++) {
61334 if (copy_in_user(&tmsgs[i].addr, &umsgs[i].addr, 3*sizeof(u16)))
61335 return -EFAULT;
61336- if (get_user(datap, &umsgs[i].buf) ||
61337- put_user(compat_ptr(datap), &tmsgs[i].buf))
61338+ if (get_user(datap, (compat_caddr_t __user *)&umsgs[i].buf) ||
61339+ put_user(compat_ptr(datap), (u8 __user * __user *)&tmsgs[i].buf))
61340 return -EFAULT;
61341 }
61342 return sys_ioctl(fd, cmd, (unsigned long)tdata);
61343@@ -797,7 +797,7 @@ static int compat_ioctl_preallocate(struct file *file,
61344 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
61345 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
61346 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
61347- copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
61348+ copy_in_user(p->l_pad, p32->l_pad, 4*sizeof(u32)))
61349 return -EFAULT;
61350
61351 return ioctl_preallocate(file, p);
61352@@ -1618,8 +1618,8 @@ COMPAT_SYSCALL_DEFINE3(ioctl, unsigned int, fd, unsigned int, cmd,
61353 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
61354 {
61355 unsigned int a, b;
61356- a = *(unsigned int *)p;
61357- b = *(unsigned int *)q;
61358+ a = *(const unsigned int *)p;
61359+ b = *(const unsigned int *)q;
61360 if (a > b)
61361 return 1;
61362 if (a < b)
61363diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
61364index 668dcab..daebcd6 100644
61365--- a/fs/configfs/dir.c
61366+++ b/fs/configfs/dir.c
61367@@ -1548,7 +1548,8 @@ static int configfs_readdir(struct file *file, struct dir_context *ctx)
61368 }
61369 for (p = q->next; p != &parent_sd->s_children; p = p->next) {
61370 struct configfs_dirent *next;
61371- const char *name;
61372+ const unsigned char * name;
61373+ char d_name[sizeof(next->s_dentry->d_iname)];
61374 int len;
61375 struct inode *inode = NULL;
61376
61377@@ -1557,7 +1558,12 @@ static int configfs_readdir(struct file *file, struct dir_context *ctx)
61378 continue;
61379
61380 name = configfs_get_name(next);
61381- len = strlen(name);
61382+ if (next->s_dentry && name == next->s_dentry->d_iname) {
61383+ len = next->s_dentry->d_name.len;
61384+ memcpy(d_name, name, len);
61385+ name = d_name;
61386+ } else
61387+ len = strlen(name);
61388
61389 /*
61390 * We'll have a dentry and an inode for
61391diff --git a/fs/coredump.c b/fs/coredump.c
61392index a93f7e6..d58bcbe 100644
61393--- a/fs/coredump.c
61394+++ b/fs/coredump.c
61395@@ -442,8 +442,8 @@ static void wait_for_dump_helpers(struct file *file)
61396 struct pipe_inode_info *pipe = file->private_data;
61397
61398 pipe_lock(pipe);
61399- pipe->readers++;
61400- pipe->writers--;
61401+ atomic_inc(&pipe->readers);
61402+ atomic_dec(&pipe->writers);
61403 wake_up_interruptible_sync(&pipe->wait);
61404 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
61405 pipe_unlock(pipe);
61406@@ -452,11 +452,11 @@ static void wait_for_dump_helpers(struct file *file)
61407 * We actually want wait_event_freezable() but then we need
61408 * to clear TIF_SIGPENDING and improve dump_interrupted().
61409 */
61410- wait_event_interruptible(pipe->wait, pipe->readers == 1);
61411+ wait_event_interruptible(pipe->wait, atomic_read(&pipe->readers) == 1);
61412
61413 pipe_lock(pipe);
61414- pipe->readers--;
61415- pipe->writers++;
61416+ atomic_dec(&pipe->readers);
61417+ atomic_inc(&pipe->writers);
61418 pipe_unlock(pipe);
61419 }
61420
61421@@ -503,7 +503,9 @@ void do_coredump(const siginfo_t *siginfo)
61422 struct files_struct *displaced;
61423 bool need_nonrelative = false;
61424 bool core_dumped = false;
61425- static atomic_t core_dump_count = ATOMIC_INIT(0);
61426+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
61427+ long signr = siginfo->si_signo;
61428+ int dumpable;
61429 struct coredump_params cprm = {
61430 .siginfo = siginfo,
61431 .regs = signal_pt_regs(),
61432@@ -516,12 +518,17 @@ void do_coredump(const siginfo_t *siginfo)
61433 .mm_flags = mm->flags,
61434 };
61435
61436- audit_core_dumps(siginfo->si_signo);
61437+ audit_core_dumps(signr);
61438+
61439+ dumpable = __get_dumpable(cprm.mm_flags);
61440+
61441+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
61442+ gr_handle_brute_attach(dumpable);
61443
61444 binfmt = mm->binfmt;
61445 if (!binfmt || !binfmt->core_dump)
61446 goto fail;
61447- if (!__get_dumpable(cprm.mm_flags))
61448+ if (!dumpable)
61449 goto fail;
61450
61451 cred = prepare_creds();
61452@@ -540,7 +547,7 @@ void do_coredump(const siginfo_t *siginfo)
61453 need_nonrelative = true;
61454 }
61455
61456- retval = coredump_wait(siginfo->si_signo, &core_state);
61457+ retval = coredump_wait(signr, &core_state);
61458 if (retval < 0)
61459 goto fail_creds;
61460
61461@@ -583,7 +590,7 @@ void do_coredump(const siginfo_t *siginfo)
61462 }
61463 cprm.limit = RLIM_INFINITY;
61464
61465- dump_count = atomic_inc_return(&core_dump_count);
61466+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
61467 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
61468 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
61469 task_tgid_vnr(current), current->comm);
61470@@ -615,6 +622,8 @@ void do_coredump(const siginfo_t *siginfo)
61471 } else {
61472 struct inode *inode;
61473
61474+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
61475+
61476 if (cprm.limit < binfmt->min_coredump)
61477 goto fail_unlock;
61478
61479@@ -673,7 +682,7 @@ close_fail:
61480 filp_close(cprm.file, NULL);
61481 fail_dropcount:
61482 if (ispipe)
61483- atomic_dec(&core_dump_count);
61484+ atomic_dec_unchecked(&core_dump_count);
61485 fail_unlock:
61486 kfree(cn.corename);
61487 coredump_finish(mm, core_dumped);
61488@@ -694,6 +703,8 @@ int dump_emit(struct coredump_params *cprm, const void *addr, int nr)
61489 struct file *file = cprm->file;
61490 loff_t pos = file->f_pos;
61491 ssize_t n;
61492+
61493+ gr_learn_resource(current, RLIMIT_CORE, cprm->written + nr, 1);
61494 if (cprm->written + nr > cprm->limit)
61495 return 0;
61496 while (nr) {
61497diff --git a/fs/dcache.c b/fs/dcache.c
61498index cb25a1a..c557cb6 100644
61499--- a/fs/dcache.c
61500+++ b/fs/dcache.c
61501@@ -478,7 +478,7 @@ static void __dentry_kill(struct dentry *dentry)
61502 * dentry_iput drops the locks, at which point nobody (except
61503 * transient RCU lookups) can reach this dentry.
61504 */
61505- BUG_ON((int)dentry->d_lockref.count > 0);
61506+ BUG_ON((int)__lockref_read(&dentry->d_lockref) > 0);
61507 this_cpu_dec(nr_dentry);
61508 if (dentry->d_op && dentry->d_op->d_release)
61509 dentry->d_op->d_release(dentry);
61510@@ -531,7 +531,7 @@ static inline struct dentry *lock_parent(struct dentry *dentry)
61511 struct dentry *parent = dentry->d_parent;
61512 if (IS_ROOT(dentry))
61513 return NULL;
61514- if (unlikely((int)dentry->d_lockref.count < 0))
61515+ if (unlikely((int)__lockref_read(&dentry->d_lockref) < 0))
61516 return NULL;
61517 if (likely(spin_trylock(&parent->d_lock)))
61518 return parent;
61519@@ -608,7 +608,7 @@ repeat:
61520 dentry->d_flags |= DCACHE_REFERENCED;
61521 dentry_lru_add(dentry);
61522
61523- dentry->d_lockref.count--;
61524+ __lockref_dec(&dentry->d_lockref);
61525 spin_unlock(&dentry->d_lock);
61526 return;
61527
61528@@ -663,7 +663,7 @@ int d_invalidate(struct dentry * dentry)
61529 * We also need to leave mountpoints alone,
61530 * directory or not.
61531 */
61532- if (dentry->d_lockref.count > 1 && dentry->d_inode) {
61533+ if (__lockref_read(&dentry->d_lockref) > 1 && dentry->d_inode) {
61534 if (S_ISDIR(dentry->d_inode->i_mode) || d_mountpoint(dentry)) {
61535 spin_unlock(&dentry->d_lock);
61536 return -EBUSY;
61537@@ -679,7 +679,7 @@ EXPORT_SYMBOL(d_invalidate);
61538 /* This must be called with d_lock held */
61539 static inline void __dget_dlock(struct dentry *dentry)
61540 {
61541- dentry->d_lockref.count++;
61542+ __lockref_inc(&dentry->d_lockref);
61543 }
61544
61545 static inline void __dget(struct dentry *dentry)
61546@@ -720,8 +720,8 @@ repeat:
61547 goto repeat;
61548 }
61549 rcu_read_unlock();
61550- BUG_ON(!ret->d_lockref.count);
61551- ret->d_lockref.count++;
61552+ BUG_ON(!__lockref_read(&ret->d_lockref));
61553+ __lockref_inc(&ret->d_lockref);
61554 spin_unlock(&ret->d_lock);
61555 return ret;
61556 }
61557@@ -798,7 +798,7 @@ restart:
61558 spin_lock(&inode->i_lock);
61559 hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) {
61560 spin_lock(&dentry->d_lock);
61561- if (!dentry->d_lockref.count) {
61562+ if (!__lockref_read(&dentry->d_lockref)) {
61563 /*
61564 * inform the fs via d_prune that this dentry
61565 * is about to be unhashed and destroyed.
61566@@ -841,7 +841,7 @@ static void shrink_dentry_list(struct list_head *list)
61567 * We found an inuse dentry which was not removed from
61568 * the LRU because of laziness during lookup. Do not free it.
61569 */
61570- if ((int)dentry->d_lockref.count > 0) {
61571+ if ((int)__lockref_read(&dentry->d_lockref) > 0) {
61572 spin_unlock(&dentry->d_lock);
61573 if (parent)
61574 spin_unlock(&parent->d_lock);
61575@@ -879,8 +879,8 @@ static void shrink_dentry_list(struct list_head *list)
61576 dentry = parent;
61577 while (dentry && !lockref_put_or_lock(&dentry->d_lockref)) {
61578 parent = lock_parent(dentry);
61579- if (dentry->d_lockref.count != 1) {
61580- dentry->d_lockref.count--;
61581+ if (__lockref_read(&dentry->d_lockref) != 1) {
61582+ __lockref_inc(&dentry->d_lockref);
61583 spin_unlock(&dentry->d_lock);
61584 if (parent)
61585 spin_unlock(&parent->d_lock);
61586@@ -920,7 +920,7 @@ dentry_lru_isolate(struct list_head *item, spinlock_t *lru_lock, void *arg)
61587 * counts, just remove them from the LRU. Otherwise give them
61588 * another pass through the LRU.
61589 */
61590- if (dentry->d_lockref.count) {
61591+ if (__lockref_read(&dentry->d_lockref) > 0) {
61592 d_lru_isolate(dentry);
61593 spin_unlock(&dentry->d_lock);
61594 return LRU_REMOVED;
61595@@ -1149,6 +1149,7 @@ out_unlock:
61596 return;
61597
61598 rename_retry:
61599+ done_seqretry(&rename_lock, seq);
61600 if (!retry)
61601 return;
61602 seq = 1;
61603@@ -1255,7 +1256,7 @@ static enum d_walk_ret select_collect(void *_data, struct dentry *dentry)
61604 } else {
61605 if (dentry->d_flags & DCACHE_LRU_LIST)
61606 d_lru_del(dentry);
61607- if (!dentry->d_lockref.count) {
61608+ if (!__lockref_read(&dentry->d_lockref)) {
61609 d_shrink_add(dentry, &data->dispose);
61610 data->found++;
61611 }
61612@@ -1303,7 +1304,7 @@ static enum d_walk_ret umount_check(void *_data, struct dentry *dentry)
61613 return D_WALK_CONTINUE;
61614
61615 /* root with refcount 1 is fine */
61616- if (dentry == _data && dentry->d_lockref.count == 1)
61617+ if (dentry == _data && __lockref_read(&dentry->d_lockref) == 1)
61618 return D_WALK_CONTINUE;
61619
61620 printk(KERN_ERR "BUG: Dentry %p{i=%lx,n=%pd} "
61621@@ -1312,7 +1313,7 @@ static enum d_walk_ret umount_check(void *_data, struct dentry *dentry)
61622 dentry->d_inode ?
61623 dentry->d_inode->i_ino : 0UL,
61624 dentry,
61625- dentry->d_lockref.count,
61626+ __lockref_read(&dentry->d_lockref),
61627 dentry->d_sb->s_type->name,
61628 dentry->d_sb->s_id);
61629 WARN_ON(1);
61630@@ -1438,7 +1439,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
61631 */
61632 dentry->d_iname[DNAME_INLINE_LEN-1] = 0;
61633 if (name->len > DNAME_INLINE_LEN-1) {
61634- dname = kmalloc(name->len + 1, GFP_KERNEL);
61635+ dname = kmalloc(round_up(name->len + 1, sizeof(unsigned long)), GFP_KERNEL);
61636 if (!dname) {
61637 kmem_cache_free(dentry_cache, dentry);
61638 return NULL;
61639@@ -1456,7 +1457,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
61640 smp_wmb();
61641 dentry->d_name.name = dname;
61642
61643- dentry->d_lockref.count = 1;
61644+ __lockref_set(&dentry->d_lockref, 1);
61645 dentry->d_flags = 0;
61646 spin_lock_init(&dentry->d_lock);
61647 seqcount_init(&dentry->d_seq);
61648@@ -2196,7 +2197,7 @@ struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name)
61649 goto next;
61650 }
61651
61652- dentry->d_lockref.count++;
61653+ __lockref_inc(&dentry->d_lockref);
61654 found = dentry;
61655 spin_unlock(&dentry->d_lock);
61656 break;
61657@@ -2295,7 +2296,7 @@ again:
61658 spin_lock(&dentry->d_lock);
61659 inode = dentry->d_inode;
61660 isdir = S_ISDIR(inode->i_mode);
61661- if (dentry->d_lockref.count == 1) {
61662+ if (__lockref_read(&dentry->d_lockref) == 1) {
61663 if (!spin_trylock(&inode->i_lock)) {
61664 spin_unlock(&dentry->d_lock);
61665 cpu_relax();
61666@@ -2675,11 +2676,13 @@ struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
61667 if (!IS_ROOT(new)) {
61668 spin_unlock(&inode->i_lock);
61669 dput(new);
61670+ iput(inode);
61671 return ERR_PTR(-EIO);
61672 }
61673 if (d_ancestor(new, dentry)) {
61674 spin_unlock(&inode->i_lock);
61675 dput(new);
61676+ iput(inode);
61677 return ERR_PTR(-EIO);
61678 }
61679 write_seqlock(&rename_lock);
61680@@ -3300,7 +3303,7 @@ static enum d_walk_ret d_genocide_kill(void *data, struct dentry *dentry)
61681
61682 if (!(dentry->d_flags & DCACHE_GENOCIDE)) {
61683 dentry->d_flags |= DCACHE_GENOCIDE;
61684- dentry->d_lockref.count--;
61685+ __lockref_dec(&dentry->d_lockref);
61686 }
61687 }
61688 return D_WALK_CONTINUE;
61689@@ -3416,7 +3419,8 @@ void __init vfs_caches_init(unsigned long mempages)
61690 mempages -= reserve;
61691
61692 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
61693- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
61694+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY|
61695+ SLAB_NO_SANITIZE, NULL);
61696
61697 dcache_init();
61698 inode_init();
61699diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
61700index 1e3b99d..6512101 100644
61701--- a/fs/debugfs/inode.c
61702+++ b/fs/debugfs/inode.c
61703@@ -416,7 +416,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
61704 */
61705 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
61706 {
61707+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
61708+ return __create_file(name, S_IFDIR | S_IRWXU,
61709+#else
61710 return __create_file(name, S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
61711+#endif
61712 parent, NULL, NULL);
61713 }
61714 EXPORT_SYMBOL_GPL(debugfs_create_dir);
61715diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
61716index 57ee4c5..ecb13b0 100644
61717--- a/fs/ecryptfs/inode.c
61718+++ b/fs/ecryptfs/inode.c
61719@@ -673,7 +673,7 @@ static char *ecryptfs_readlink_lower(struct dentry *dentry, size_t *bufsiz)
61720 old_fs = get_fs();
61721 set_fs(get_ds());
61722 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
61723- (char __user *)lower_buf,
61724+ (char __force_user *)lower_buf,
61725 PATH_MAX);
61726 set_fs(old_fs);
61727 if (rc < 0)
61728diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
61729index e4141f2..d8263e8 100644
61730--- a/fs/ecryptfs/miscdev.c
61731+++ b/fs/ecryptfs/miscdev.c
61732@@ -304,7 +304,7 @@ check_list:
61733 goto out_unlock_msg_ctx;
61734 i = PKT_TYPE_SIZE + PKT_CTR_SIZE;
61735 if (msg_ctx->msg) {
61736- if (copy_to_user(&buf[i], packet_length, packet_length_size))
61737+ if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
61738 goto out_unlock_msg_ctx;
61739 i += packet_length_size;
61740 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
61741diff --git a/fs/exec.c b/fs/exec.c
61742index a2b42a9..1e924b3 100644
61743--- a/fs/exec.c
61744+++ b/fs/exec.c
61745@@ -56,8 +56,20 @@
61746 #include <linux/pipe_fs_i.h>
61747 #include <linux/oom.h>
61748 #include <linux/compat.h>
61749+#include <linux/random.h>
61750+#include <linux/seq_file.h>
61751+#include <linux/coredump.h>
61752+#include <linux/mman.h>
61753+
61754+#ifdef CONFIG_PAX_REFCOUNT
61755+#include <linux/kallsyms.h>
61756+#include <linux/kdebug.h>
61757+#endif
61758+
61759+#include <trace/events/fs.h>
61760
61761 #include <asm/uaccess.h>
61762+#include <asm/sections.h>
61763 #include <asm/mmu_context.h>
61764 #include <asm/tlb.h>
61765
61766@@ -66,19 +78,34 @@
61767
61768 #include <trace/events/sched.h>
61769
61770+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
61771+void __weak pax_set_initial_flags(struct linux_binprm *bprm)
61772+{
61773+ pr_warn_once("PAX: PAX_HAVE_ACL_FLAGS was enabled without providing the pax_set_initial_flags callback, this is probably not what you wanted.\n");
61774+}
61775+#endif
61776+
61777+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
61778+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
61779+EXPORT_SYMBOL(pax_set_initial_flags_func);
61780+#endif
61781+
61782 int suid_dumpable = 0;
61783
61784 static LIST_HEAD(formats);
61785 static DEFINE_RWLOCK(binfmt_lock);
61786
61787+extern int gr_process_kernel_exec_ban(void);
61788+extern int gr_process_suid_exec_ban(const struct linux_binprm *bprm);
61789+
61790 void __register_binfmt(struct linux_binfmt * fmt, int insert)
61791 {
61792 BUG_ON(!fmt);
61793 if (WARN_ON(!fmt->load_binary))
61794 return;
61795 write_lock(&binfmt_lock);
61796- insert ? list_add(&fmt->lh, &formats) :
61797- list_add_tail(&fmt->lh, &formats);
61798+ insert ? pax_list_add((struct list_head *)&fmt->lh, &formats) :
61799+ pax_list_add_tail((struct list_head *)&fmt->lh, &formats);
61800 write_unlock(&binfmt_lock);
61801 }
61802
61803@@ -87,7 +114,7 @@ EXPORT_SYMBOL(__register_binfmt);
61804 void unregister_binfmt(struct linux_binfmt * fmt)
61805 {
61806 write_lock(&binfmt_lock);
61807- list_del(&fmt->lh);
61808+ pax_list_del((struct list_head *)&fmt->lh);
61809 write_unlock(&binfmt_lock);
61810 }
61811
61812@@ -183,18 +210,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
61813 int write)
61814 {
61815 struct page *page;
61816- int ret;
61817
61818-#ifdef CONFIG_STACK_GROWSUP
61819- if (write) {
61820- ret = expand_downwards(bprm->vma, pos);
61821- if (ret < 0)
61822- return NULL;
61823- }
61824-#endif
61825- ret = get_user_pages(current, bprm->mm, pos,
61826- 1, write, 1, &page, NULL);
61827- if (ret <= 0)
61828+ if (0 > expand_downwards(bprm->vma, pos))
61829+ return NULL;
61830+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
61831 return NULL;
61832
61833 if (write) {
61834@@ -210,6 +229,17 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
61835 if (size <= ARG_MAX)
61836 return page;
61837
61838+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
61839+ // only allow 512KB for argv+env on suid/sgid binaries
61840+ // to prevent easy ASLR exhaustion
61841+ if (((!uid_eq(bprm->cred->euid, current_euid())) ||
61842+ (!gid_eq(bprm->cred->egid, current_egid()))) &&
61843+ (size > (512 * 1024))) {
61844+ put_page(page);
61845+ return NULL;
61846+ }
61847+#endif
61848+
61849 /*
61850 * Limit to 1/4-th the stack size for the argv+env strings.
61851 * This ensures that:
61852@@ -269,6 +299,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
61853 vma->vm_end = STACK_TOP_MAX;
61854 vma->vm_start = vma->vm_end - PAGE_SIZE;
61855 vma->vm_flags = VM_SOFTDIRTY | VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
61856+
61857+#ifdef CONFIG_PAX_SEGMEXEC
61858+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
61859+#endif
61860+
61861 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
61862 INIT_LIST_HEAD(&vma->anon_vma_chain);
61863
61864@@ -279,6 +314,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
61865 mm->stack_vm = mm->total_vm = 1;
61866 up_write(&mm->mmap_sem);
61867 bprm->p = vma->vm_end - sizeof(void *);
61868+
61869+#ifdef CONFIG_PAX_RANDUSTACK
61870+ if (randomize_va_space)
61871+ bprm->p ^= prandom_u32() & ~PAGE_MASK;
61872+#endif
61873+
61874 return 0;
61875 err:
61876 up_write(&mm->mmap_sem);
61877@@ -395,7 +436,7 @@ struct user_arg_ptr {
61878 } ptr;
61879 };
61880
61881-static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
61882+const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
61883 {
61884 const char __user *native;
61885
61886@@ -404,14 +445,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
61887 compat_uptr_t compat;
61888
61889 if (get_user(compat, argv.ptr.compat + nr))
61890- return ERR_PTR(-EFAULT);
61891+ return (const char __force_user *)ERR_PTR(-EFAULT);
61892
61893 return compat_ptr(compat);
61894 }
61895 #endif
61896
61897 if (get_user(native, argv.ptr.native + nr))
61898- return ERR_PTR(-EFAULT);
61899+ return (const char __force_user *)ERR_PTR(-EFAULT);
61900
61901 return native;
61902 }
61903@@ -430,7 +471,7 @@ static int count(struct user_arg_ptr argv, int max)
61904 if (!p)
61905 break;
61906
61907- if (IS_ERR(p))
61908+ if (IS_ERR((const char __force_kernel *)p))
61909 return -EFAULT;
61910
61911 if (i >= max)
61912@@ -465,7 +506,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
61913
61914 ret = -EFAULT;
61915 str = get_user_arg_ptr(argv, argc);
61916- if (IS_ERR(str))
61917+ if (IS_ERR((const char __force_kernel *)str))
61918 goto out;
61919
61920 len = strnlen_user(str, MAX_ARG_STRLEN);
61921@@ -547,7 +588,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
61922 int r;
61923 mm_segment_t oldfs = get_fs();
61924 struct user_arg_ptr argv = {
61925- .ptr.native = (const char __user *const __user *)__argv,
61926+ .ptr.native = (const char __user * const __force_user *)__argv,
61927 };
61928
61929 set_fs(KERNEL_DS);
61930@@ -582,7 +623,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
61931 unsigned long new_end = old_end - shift;
61932 struct mmu_gather tlb;
61933
61934- BUG_ON(new_start > new_end);
61935+ if (new_start >= new_end || new_start < mmap_min_addr)
61936+ return -ENOMEM;
61937
61938 /*
61939 * ensure there are no vmas between where we want to go
61940@@ -591,6 +633,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
61941 if (vma != find_vma(mm, new_start))
61942 return -EFAULT;
61943
61944+#ifdef CONFIG_PAX_SEGMEXEC
61945+ BUG_ON(pax_find_mirror_vma(vma));
61946+#endif
61947+
61948 /*
61949 * cover the whole range: [new_start, old_end)
61950 */
61951@@ -671,10 +717,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
61952 stack_top = arch_align_stack(stack_top);
61953 stack_top = PAGE_ALIGN(stack_top);
61954
61955- if (unlikely(stack_top < mmap_min_addr) ||
61956- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
61957- return -ENOMEM;
61958-
61959 stack_shift = vma->vm_end - stack_top;
61960
61961 bprm->p -= stack_shift;
61962@@ -686,8 +728,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
61963 bprm->exec -= stack_shift;
61964
61965 down_write(&mm->mmap_sem);
61966+
61967+ /* Move stack pages down in memory. */
61968+ if (stack_shift) {
61969+ ret = shift_arg_pages(vma, stack_shift);
61970+ if (ret)
61971+ goto out_unlock;
61972+ }
61973+
61974 vm_flags = VM_STACK_FLAGS;
61975
61976+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
61977+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
61978+ vm_flags &= ~VM_EXEC;
61979+
61980+#ifdef CONFIG_PAX_MPROTECT
61981+ if (mm->pax_flags & MF_PAX_MPROTECT)
61982+ vm_flags &= ~VM_MAYEXEC;
61983+#endif
61984+
61985+ }
61986+#endif
61987+
61988 /*
61989 * Adjust stack execute permissions; explicitly enable for
61990 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
61991@@ -706,13 +768,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
61992 goto out_unlock;
61993 BUG_ON(prev != vma);
61994
61995- /* Move stack pages down in memory. */
61996- if (stack_shift) {
61997- ret = shift_arg_pages(vma, stack_shift);
61998- if (ret)
61999- goto out_unlock;
62000- }
62001-
62002 /* mprotect_fixup is overkill to remove the temporary stack flags */
62003 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
62004
62005@@ -736,6 +791,27 @@ int setup_arg_pages(struct linux_binprm *bprm,
62006 #endif
62007 current->mm->start_stack = bprm->p;
62008 ret = expand_stack(vma, stack_base);
62009+
62010+#if !defined(CONFIG_STACK_GROWSUP) && defined(CONFIG_PAX_RANDMMAP)
62011+ if (!ret && (mm->pax_flags & MF_PAX_RANDMMAP) && STACK_TOP <= 0xFFFFFFFFU && STACK_TOP > vma->vm_end) {
62012+ unsigned long size;
62013+ vm_flags_t vm_flags;
62014+
62015+ size = STACK_TOP - vma->vm_end;
62016+ vm_flags = VM_NONE | VM_DONTEXPAND | VM_DONTDUMP;
62017+
62018+ ret = vma->vm_end != mmap_region(NULL, vma->vm_end, size, vm_flags, 0);
62019+
62020+#ifdef CONFIG_X86
62021+ if (!ret) {
62022+ size = PAGE_SIZE + mmap_min_addr + ((mm->delta_mmap ^ mm->delta_stack) & (0xFFUL << PAGE_SHIFT));
62023+ ret = 0 != mmap_region(NULL, 0, PAGE_ALIGN(size), vm_flags, 0);
62024+ }
62025+#endif
62026+
62027+ }
62028+#endif
62029+
62030 if (ret)
62031 ret = -EFAULT;
62032
62033@@ -771,6 +847,8 @@ static struct file *do_open_exec(struct filename *name)
62034
62035 fsnotify_open(file);
62036
62037+ trace_open_exec(name->name);
62038+
62039 err = deny_write_access(file);
62040 if (err)
62041 goto exit;
62042@@ -800,7 +878,7 @@ int kernel_read(struct file *file, loff_t offset,
62043 old_fs = get_fs();
62044 set_fs(get_ds());
62045 /* The cast to a user pointer is valid due to the set_fs() */
62046- result = vfs_read(file, (void __user *)addr, count, &pos);
62047+ result = vfs_read(file, (void __force_user *)addr, count, &pos);
62048 set_fs(old_fs);
62049 return result;
62050 }
62051@@ -845,6 +923,7 @@ static int exec_mmap(struct mm_struct *mm)
62052 tsk->mm = mm;
62053 tsk->active_mm = mm;
62054 activate_mm(active_mm, mm);
62055+ populate_stack();
62056 tsk->mm->vmacache_seqnum = 0;
62057 vmacache_flush(tsk);
62058 task_unlock(tsk);
62059@@ -1243,7 +1322,7 @@ static void check_unsafe_exec(struct linux_binprm *bprm)
62060 }
62061 rcu_read_unlock();
62062
62063- if (p->fs->users > n_fs)
62064+ if (atomic_read(&p->fs->users) > n_fs)
62065 bprm->unsafe |= LSM_UNSAFE_SHARE;
62066 else
62067 p->fs->in_exec = 1;
62068@@ -1419,6 +1498,31 @@ static int exec_binprm(struct linux_binprm *bprm)
62069 return ret;
62070 }
62071
62072+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
62073+static DEFINE_PER_CPU(u64, exec_counter);
62074+static int __init init_exec_counters(void)
62075+{
62076+ unsigned int cpu;
62077+
62078+ for_each_possible_cpu(cpu) {
62079+ per_cpu(exec_counter, cpu) = (u64)cpu;
62080+ }
62081+
62082+ return 0;
62083+}
62084+early_initcall(init_exec_counters);
62085+static inline void increment_exec_counter(void)
62086+{
62087+ BUILD_BUG_ON(NR_CPUS > (1 << 16));
62088+ current->exec_id = this_cpu_add_return(exec_counter, 1 << 16);
62089+}
62090+#else
62091+static inline void increment_exec_counter(void) {}
62092+#endif
62093+
62094+extern void gr_handle_exec_args(struct linux_binprm *bprm,
62095+ struct user_arg_ptr argv);
62096+
62097 /*
62098 * sys_execve() executes a new program.
62099 */
62100@@ -1426,6 +1530,11 @@ static int do_execve_common(struct filename *filename,
62101 struct user_arg_ptr argv,
62102 struct user_arg_ptr envp)
62103 {
62104+#ifdef CONFIG_GRKERNSEC
62105+ struct file *old_exec_file;
62106+ struct acl_subject_label *old_acl;
62107+ struct rlimit old_rlim[RLIM_NLIMITS];
62108+#endif
62109 struct linux_binprm *bprm;
62110 struct file *file;
62111 struct files_struct *displaced;
62112@@ -1434,6 +1543,8 @@ static int do_execve_common(struct filename *filename,
62113 if (IS_ERR(filename))
62114 return PTR_ERR(filename);
62115
62116+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current_user()->processes), 1);
62117+
62118 /*
62119 * We move the actual failure in case of RLIMIT_NPROC excess from
62120 * set*uid() to execve() because too many poorly written programs
62121@@ -1471,11 +1582,21 @@ static int do_execve_common(struct filename *filename,
62122 if (IS_ERR(file))
62123 goto out_unmark;
62124
62125+ if (gr_ptrace_readexec(file, bprm->unsafe)) {
62126+ retval = -EPERM;
62127+ goto out_unmark;
62128+ }
62129+
62130 sched_exec();
62131
62132 bprm->file = file;
62133 bprm->filename = bprm->interp = filename->name;
62134
62135+ if (!gr_acl_handle_execve(file->f_path.dentry, file->f_path.mnt)) {
62136+ retval = -EACCES;
62137+ goto out_unmark;
62138+ }
62139+
62140 retval = bprm_mm_init(bprm);
62141 if (retval)
62142 goto out_unmark;
62143@@ -1492,24 +1613,70 @@ static int do_execve_common(struct filename *filename,
62144 if (retval < 0)
62145 goto out;
62146
62147+#ifdef CONFIG_GRKERNSEC
62148+ old_acl = current->acl;
62149+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
62150+ old_exec_file = current->exec_file;
62151+ get_file(file);
62152+ current->exec_file = file;
62153+#endif
62154+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
62155+ /* limit suid stack to 8MB
62156+ * we saved the old limits above and will restore them if this exec fails
62157+ */
62158+ if (((!uid_eq(bprm->cred->euid, current_euid())) || (!gid_eq(bprm->cred->egid, current_egid()))) &&
62159+ (old_rlim[RLIMIT_STACK].rlim_cur > (8 * 1024 * 1024)))
62160+ current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024;
62161+#endif
62162+
62163+ if (gr_process_kernel_exec_ban() || gr_process_suid_exec_ban(bprm)) {
62164+ retval = -EPERM;
62165+ goto out_fail;
62166+ }
62167+
62168+ if (!gr_tpe_allow(file)) {
62169+ retval = -EACCES;
62170+ goto out_fail;
62171+ }
62172+
62173+ if (gr_check_crash_exec(file)) {
62174+ retval = -EACCES;
62175+ goto out_fail;
62176+ }
62177+
62178+ retval = gr_set_proc_label(file->f_path.dentry, file->f_path.mnt,
62179+ bprm->unsafe);
62180+ if (retval < 0)
62181+ goto out_fail;
62182+
62183 retval = copy_strings_kernel(1, &bprm->filename, bprm);
62184 if (retval < 0)
62185- goto out;
62186+ goto out_fail;
62187
62188 bprm->exec = bprm->p;
62189 retval = copy_strings(bprm->envc, envp, bprm);
62190 if (retval < 0)
62191- goto out;
62192+ goto out_fail;
62193
62194 retval = copy_strings(bprm->argc, argv, bprm);
62195 if (retval < 0)
62196- goto out;
62197+ goto out_fail;
62198+
62199+ gr_log_chroot_exec(file->f_path.dentry, file->f_path.mnt);
62200+
62201+ gr_handle_exec_args(bprm, argv);
62202
62203 retval = exec_binprm(bprm);
62204 if (retval < 0)
62205- goto out;
62206+ goto out_fail;
62207+#ifdef CONFIG_GRKERNSEC
62208+ if (old_exec_file)
62209+ fput(old_exec_file);
62210+#endif
62211
62212 /* execve succeeded */
62213+
62214+ increment_exec_counter();
62215 current->fs->in_exec = 0;
62216 current->in_execve = 0;
62217 acct_update_integrals(current);
62218@@ -1520,6 +1687,14 @@ static int do_execve_common(struct filename *filename,
62219 put_files_struct(displaced);
62220 return retval;
62221
62222+out_fail:
62223+#ifdef CONFIG_GRKERNSEC
62224+ current->acl = old_acl;
62225+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
62226+ fput(current->exec_file);
62227+ current->exec_file = old_exec_file;
62228+#endif
62229+
62230 out:
62231 if (bprm->mm) {
62232 acct_arg_size(bprm, 0);
62233@@ -1611,3 +1786,312 @@ COMPAT_SYSCALL_DEFINE3(execve, const char __user *, filename,
62234 return compat_do_execve(getname(filename), argv, envp);
62235 }
62236 #endif
62237+
62238+int pax_check_flags(unsigned long *flags)
62239+{
62240+ int retval = 0;
62241+
62242+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
62243+ if (*flags & MF_PAX_SEGMEXEC)
62244+ {
62245+ *flags &= ~MF_PAX_SEGMEXEC;
62246+ retval = -EINVAL;
62247+ }
62248+#endif
62249+
62250+ if ((*flags & MF_PAX_PAGEEXEC)
62251+
62252+#ifdef CONFIG_PAX_PAGEEXEC
62253+ && (*flags & MF_PAX_SEGMEXEC)
62254+#endif
62255+
62256+ )
62257+ {
62258+ *flags &= ~MF_PAX_PAGEEXEC;
62259+ retval = -EINVAL;
62260+ }
62261+
62262+ if ((*flags & MF_PAX_MPROTECT)
62263+
62264+#ifdef CONFIG_PAX_MPROTECT
62265+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
62266+#endif
62267+
62268+ )
62269+ {
62270+ *flags &= ~MF_PAX_MPROTECT;
62271+ retval = -EINVAL;
62272+ }
62273+
62274+ if ((*flags & MF_PAX_EMUTRAMP)
62275+
62276+#ifdef CONFIG_PAX_EMUTRAMP
62277+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
62278+#endif
62279+
62280+ )
62281+ {
62282+ *flags &= ~MF_PAX_EMUTRAMP;
62283+ retval = -EINVAL;
62284+ }
62285+
62286+ return retval;
62287+}
62288+
62289+EXPORT_SYMBOL(pax_check_flags);
62290+
62291+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
62292+char *pax_get_path(const struct path *path, char *buf, int buflen)
62293+{
62294+ char *pathname = d_path(path, buf, buflen);
62295+
62296+ if (IS_ERR(pathname))
62297+ goto toolong;
62298+
62299+ pathname = mangle_path(buf, pathname, "\t\n\\");
62300+ if (!pathname)
62301+ goto toolong;
62302+
62303+ *pathname = 0;
62304+ return buf;
62305+
62306+toolong:
62307+ return "<path too long>";
62308+}
62309+EXPORT_SYMBOL(pax_get_path);
62310+
62311+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
62312+{
62313+ struct task_struct *tsk = current;
62314+ struct mm_struct *mm = current->mm;
62315+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
62316+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
62317+ char *path_exec = NULL;
62318+ char *path_fault = NULL;
62319+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
62320+ siginfo_t info = { };
62321+
62322+ if (buffer_exec && buffer_fault) {
62323+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
62324+
62325+ down_read(&mm->mmap_sem);
62326+ vma = mm->mmap;
62327+ while (vma && (!vma_exec || !vma_fault)) {
62328+ if (vma->vm_file && mm->exe_file == vma->vm_file && (vma->vm_flags & VM_EXEC))
62329+ vma_exec = vma;
62330+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
62331+ vma_fault = vma;
62332+ vma = vma->vm_next;
62333+ }
62334+ if (vma_exec)
62335+ path_exec = pax_get_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
62336+ if (vma_fault) {
62337+ start = vma_fault->vm_start;
62338+ end = vma_fault->vm_end;
62339+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
62340+ if (vma_fault->vm_file)
62341+ path_fault = pax_get_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
62342+ else if ((unsigned long)pc >= mm->start_brk && (unsigned long)pc < mm->brk)
62343+ path_fault = "<heap>";
62344+ else if (vma_fault->vm_flags & (VM_GROWSDOWN | VM_GROWSUP))
62345+ path_fault = "<stack>";
62346+ else
62347+ path_fault = "<anonymous mapping>";
62348+ }
62349+ up_read(&mm->mmap_sem);
62350+ }
62351+ if (tsk->signal->curr_ip)
62352+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
62353+ else
62354+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
62355+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
62356+ from_kuid_munged(&init_user_ns, task_uid(tsk)), from_kuid_munged(&init_user_ns, task_euid(tsk)), pc, sp);
62357+ free_page((unsigned long)buffer_exec);
62358+ free_page((unsigned long)buffer_fault);
62359+ pax_report_insns(regs, pc, sp);
62360+ info.si_signo = SIGKILL;
62361+ info.si_errno = 0;
62362+ info.si_code = SI_KERNEL;
62363+ info.si_pid = 0;
62364+ info.si_uid = 0;
62365+ do_coredump(&info);
62366+}
62367+#endif
62368+
62369+#ifdef CONFIG_PAX_REFCOUNT
62370+void pax_report_refcount_overflow(struct pt_regs *regs)
62371+{
62372+ if (current->signal->curr_ip)
62373+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
62374+ &current->signal->curr_ip, current->comm, task_pid_nr(current),
62375+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
62376+ else
62377+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n", current->comm, task_pid_nr(current),
62378+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
62379+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
62380+ preempt_disable();
62381+ show_regs(regs);
62382+ preempt_enable();
62383+ force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
62384+}
62385+#endif
62386+
62387+#ifdef CONFIG_PAX_USERCOPY
62388+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
62389+static noinline int check_stack_object(const void *obj, unsigned long len)
62390+{
62391+ const void * const stack = task_stack_page(current);
62392+ const void * const stackend = stack + THREAD_SIZE;
62393+
62394+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
62395+ const void *frame = NULL;
62396+ const void *oldframe;
62397+#endif
62398+
62399+ if (obj + len < obj)
62400+ return -1;
62401+
62402+ if (obj + len <= stack || stackend <= obj)
62403+ return 0;
62404+
62405+ if (obj < stack || stackend < obj + len)
62406+ return -1;
62407+
62408+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
62409+ oldframe = __builtin_frame_address(1);
62410+ if (oldframe)
62411+ frame = __builtin_frame_address(2);
62412+ /*
62413+ low ----------------------------------------------> high
62414+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
62415+ ^----------------^
62416+ allow copies only within here
62417+ */
62418+ while (stack <= frame && frame < stackend) {
62419+ /* if obj + len extends past the last frame, this
62420+ check won't pass and the next frame will be 0,
62421+ causing us to bail out and correctly report
62422+ the copy as invalid
62423+ */
62424+ if (obj + len <= frame)
62425+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
62426+ oldframe = frame;
62427+ frame = *(const void * const *)frame;
62428+ }
62429+ return -1;
62430+#else
62431+ return 1;
62432+#endif
62433+}
62434+
62435+static __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to_user, const char *type)
62436+{
62437+ if (current->signal->curr_ip)
62438+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
62439+ &current->signal->curr_ip, to_user ? "leak" : "overwrite", to_user ? "from" : "to", ptr, type ? : "unknown", len);
62440+ else
62441+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
62442+ to_user ? "leak" : "overwrite", to_user ? "from" : "to", ptr, type ? : "unknown", len);
62443+ dump_stack();
62444+ gr_handle_kernel_exploit();
62445+ do_group_exit(SIGKILL);
62446+}
62447+#endif
62448+
62449+#ifdef CONFIG_PAX_USERCOPY
62450+
62451+static inline bool check_kernel_text_object(unsigned long low, unsigned long high)
62452+{
62453+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
62454+ unsigned long textlow = ktla_ktva((unsigned long)_stext);
62455+#ifdef CONFIG_MODULES
62456+ unsigned long texthigh = (unsigned long)MODULES_EXEC_VADDR;
62457+#else
62458+ unsigned long texthigh = ktla_ktva((unsigned long)_etext);
62459+#endif
62460+
62461+#else
62462+ unsigned long textlow = (unsigned long)_stext;
62463+ unsigned long texthigh = (unsigned long)_etext;
62464+
62465+#ifdef CONFIG_X86_64
62466+ /* check against linear mapping as well */
62467+ if (high > (unsigned long)__va(__pa(textlow)) &&
62468+ low < (unsigned long)__va(__pa(texthigh)))
62469+ return true;
62470+#endif
62471+
62472+#endif
62473+
62474+ if (high <= textlow || low >= texthigh)
62475+ return false;
62476+ else
62477+ return true;
62478+}
62479+#endif
62480+
62481+void __check_object_size(const void *ptr, unsigned long n, bool to_user, bool const_size)
62482+{
62483+#ifdef CONFIG_PAX_USERCOPY
62484+ const char *type;
62485+#endif
62486+
62487+#ifndef CONFIG_STACK_GROWSUP
62488+ unsigned long stackstart = (unsigned long)task_stack_page(current);
62489+ unsigned long currentsp = (unsigned long)&stackstart;
62490+ if (unlikely((currentsp < stackstart + 512 ||
62491+ currentsp >= stackstart + THREAD_SIZE) && !in_interrupt()))
62492+ BUG();
62493+#endif
62494+
62495+#ifndef CONFIG_PAX_USERCOPY_DEBUG
62496+ if (const_size)
62497+ return;
62498+#endif
62499+
62500+#ifdef CONFIG_PAX_USERCOPY
62501+ if (!n)
62502+ return;
62503+
62504+ type = check_heap_object(ptr, n);
62505+ if (!type) {
62506+ int ret = check_stack_object(ptr, n);
62507+ if (ret == 1 || ret == 2)
62508+ return;
62509+ if (ret == 0) {
62510+ if (check_kernel_text_object((unsigned long)ptr, (unsigned long)ptr + n))
62511+ type = "<kernel text>";
62512+ else
62513+ return;
62514+ } else
62515+ type = "<process stack>";
62516+ }
62517+
62518+ pax_report_usercopy(ptr, n, to_user, type);
62519+#endif
62520+
62521+}
62522+EXPORT_SYMBOL(__check_object_size);
62523+
62524+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
62525+void pax_track_stack(void)
62526+{
62527+ unsigned long sp = (unsigned long)&sp;
62528+ if (sp < current_thread_info()->lowest_stack &&
62529+ sp > (unsigned long)task_stack_page(current))
62530+ current_thread_info()->lowest_stack = sp;
62531+ if (unlikely((sp & ~(THREAD_SIZE - 1)) < (THREAD_SIZE/16)))
62532+ BUG();
62533+}
62534+EXPORT_SYMBOL(pax_track_stack);
62535+#endif
62536+
62537+#ifdef CONFIG_PAX_SIZE_OVERFLOW
62538+void report_size_overflow(const char *file, unsigned int line, const char *func, const char *ssa_name)
62539+{
62540+ printk(KERN_ERR "PAX: size overflow detected in function %s %s:%u %s", func, file, line, ssa_name);
62541+ dump_stack();
62542+ do_group_exit(SIGKILL);
62543+}
62544+EXPORT_SYMBOL(report_size_overflow);
62545+#endif
62546diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
62547index 9f9992b..8b59411 100644
62548--- a/fs/ext2/balloc.c
62549+++ b/fs/ext2/balloc.c
62550@@ -1184,10 +1184,10 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
62551
62552 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
62553 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
62554- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
62555+ if (free_blocks < root_blocks + 1 &&
62556 !uid_eq(sbi->s_resuid, current_fsuid()) &&
62557 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
62558- !in_group_p (sbi->s_resgid))) {
62559+ !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
62560 return 0;
62561 }
62562 return 1;
62563diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
62564index 9142614..97484fa 100644
62565--- a/fs/ext2/xattr.c
62566+++ b/fs/ext2/xattr.c
62567@@ -247,7 +247,7 @@ ext2_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size)
62568 struct buffer_head *bh = NULL;
62569 struct ext2_xattr_entry *entry;
62570 char *end;
62571- size_t rest = buffer_size;
62572+ size_t rest = buffer_size, total_size = 0;
62573 int error;
62574
62575 ea_idebug(inode, "buffer=%p, buffer_size=%ld",
62576@@ -305,9 +305,10 @@ bad_block: ext2_error(inode->i_sb, "ext2_xattr_list",
62577 buffer += size;
62578 }
62579 rest -= size;
62580+ total_size += size;
62581 }
62582 }
62583- error = buffer_size - rest; /* total size */
62584+ error = total_size;
62585
62586 cleanup:
62587 brelse(bh);
62588diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
62589index 158b5d4..2432610 100644
62590--- a/fs/ext3/balloc.c
62591+++ b/fs/ext3/balloc.c
62592@@ -1438,10 +1438,10 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation)
62593
62594 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
62595 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
62596- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
62597+ if (free_blocks < root_blocks + 1 &&
62598 !use_reservation && !uid_eq(sbi->s_resuid, current_fsuid()) &&
62599 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
62600- !in_group_p (sbi->s_resgid))) {
62601+ !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
62602 return 0;
62603 }
62604 return 1;
62605diff --git a/fs/ext3/xattr.c b/fs/ext3/xattr.c
62606index c6874be..f8a6ae8 100644
62607--- a/fs/ext3/xattr.c
62608+++ b/fs/ext3/xattr.c
62609@@ -330,7 +330,7 @@ static int
62610 ext3_xattr_list_entries(struct dentry *dentry, struct ext3_xattr_entry *entry,
62611 char *buffer, size_t buffer_size)
62612 {
62613- size_t rest = buffer_size;
62614+ size_t rest = buffer_size, total_size = 0;
62615
62616 for (; !IS_LAST_ENTRY(entry); entry = EXT3_XATTR_NEXT(entry)) {
62617 const struct xattr_handler *handler =
62618@@ -347,9 +347,10 @@ ext3_xattr_list_entries(struct dentry *dentry, struct ext3_xattr_entry *entry,
62619 buffer += size;
62620 }
62621 rest -= size;
62622+ total_size += size;
62623 }
62624 }
62625- return buffer_size - rest;
62626+ return total_size;
62627 }
62628
62629 static int
62630diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
62631index 581ef40..cec52d7 100644
62632--- a/fs/ext4/balloc.c
62633+++ b/fs/ext4/balloc.c
62634@@ -553,8 +553,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
62635 /* Hm, nope. Are (enough) root reserved clusters available? */
62636 if (uid_eq(sbi->s_resuid, current_fsuid()) ||
62637 (!gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) && in_group_p(sbi->s_resgid)) ||
62638- capable(CAP_SYS_RESOURCE) ||
62639- (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
62640+ (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
62641+ capable_nolog(CAP_SYS_RESOURCE)) {
62642
62643 if (free_clusters >= (nclusters + dirty_clusters +
62644 resv_clusters))
62645diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
62646index b0c225c..0e69bd7 100644
62647--- a/fs/ext4/ext4.h
62648+++ b/fs/ext4/ext4.h
62649@@ -1275,19 +1275,19 @@ struct ext4_sb_info {
62650 unsigned long s_mb_last_start;
62651
62652 /* stats for buddy allocator */
62653- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
62654- atomic_t s_bal_success; /* we found long enough chunks */
62655- atomic_t s_bal_allocated; /* in blocks */
62656- atomic_t s_bal_ex_scanned; /* total extents scanned */
62657- atomic_t s_bal_goals; /* goal hits */
62658- atomic_t s_bal_breaks; /* too long searches */
62659- atomic_t s_bal_2orders; /* 2^order hits */
62660+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
62661+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
62662+ atomic_unchecked_t s_bal_allocated; /* in blocks */
62663+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
62664+ atomic_unchecked_t s_bal_goals; /* goal hits */
62665+ atomic_unchecked_t s_bal_breaks; /* too long searches */
62666+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
62667 spinlock_t s_bal_lock;
62668 unsigned long s_mb_buddies_generated;
62669 unsigned long long s_mb_generation_time;
62670- atomic_t s_mb_lost_chunks;
62671- atomic_t s_mb_preallocated;
62672- atomic_t s_mb_discarded;
62673+ atomic_unchecked_t s_mb_lost_chunks;
62674+ atomic_unchecked_t s_mb_preallocated;
62675+ atomic_unchecked_t s_mb_discarded;
62676 atomic_t s_lock_busy;
62677
62678 /* locality groups */
62679diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
62680index 8b0f9ef..cb9f620 100644
62681--- a/fs/ext4/mballoc.c
62682+++ b/fs/ext4/mballoc.c
62683@@ -1901,7 +1901,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
62684 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
62685
62686 if (EXT4_SB(sb)->s_mb_stats)
62687- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
62688+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
62689
62690 break;
62691 }
62692@@ -2211,7 +2211,7 @@ repeat:
62693 ac->ac_status = AC_STATUS_CONTINUE;
62694 ac->ac_flags |= EXT4_MB_HINT_FIRST;
62695 cr = 3;
62696- atomic_inc(&sbi->s_mb_lost_chunks);
62697+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
62698 goto repeat;
62699 }
62700 }
62701@@ -2717,25 +2717,25 @@ int ext4_mb_release(struct super_block *sb)
62702 if (sbi->s_mb_stats) {
62703 ext4_msg(sb, KERN_INFO,
62704 "mballoc: %u blocks %u reqs (%u success)",
62705- atomic_read(&sbi->s_bal_allocated),
62706- atomic_read(&sbi->s_bal_reqs),
62707- atomic_read(&sbi->s_bal_success));
62708+ atomic_read_unchecked(&sbi->s_bal_allocated),
62709+ atomic_read_unchecked(&sbi->s_bal_reqs),
62710+ atomic_read_unchecked(&sbi->s_bal_success));
62711 ext4_msg(sb, KERN_INFO,
62712 "mballoc: %u extents scanned, %u goal hits, "
62713 "%u 2^N hits, %u breaks, %u lost",
62714- atomic_read(&sbi->s_bal_ex_scanned),
62715- atomic_read(&sbi->s_bal_goals),
62716- atomic_read(&sbi->s_bal_2orders),
62717- atomic_read(&sbi->s_bal_breaks),
62718- atomic_read(&sbi->s_mb_lost_chunks));
62719+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
62720+ atomic_read_unchecked(&sbi->s_bal_goals),
62721+ atomic_read_unchecked(&sbi->s_bal_2orders),
62722+ atomic_read_unchecked(&sbi->s_bal_breaks),
62723+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
62724 ext4_msg(sb, KERN_INFO,
62725 "mballoc: %lu generated and it took %Lu",
62726 sbi->s_mb_buddies_generated,
62727 sbi->s_mb_generation_time);
62728 ext4_msg(sb, KERN_INFO,
62729 "mballoc: %u preallocated, %u discarded",
62730- atomic_read(&sbi->s_mb_preallocated),
62731- atomic_read(&sbi->s_mb_discarded));
62732+ atomic_read_unchecked(&sbi->s_mb_preallocated),
62733+ atomic_read_unchecked(&sbi->s_mb_discarded));
62734 }
62735
62736 free_percpu(sbi->s_locality_groups);
62737@@ -3192,16 +3192,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
62738 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
62739
62740 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
62741- atomic_inc(&sbi->s_bal_reqs);
62742- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
62743+ atomic_inc_unchecked(&sbi->s_bal_reqs);
62744+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
62745 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
62746- atomic_inc(&sbi->s_bal_success);
62747- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
62748+ atomic_inc_unchecked(&sbi->s_bal_success);
62749+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
62750 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
62751 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
62752- atomic_inc(&sbi->s_bal_goals);
62753+ atomic_inc_unchecked(&sbi->s_bal_goals);
62754 if (ac->ac_found > sbi->s_mb_max_to_scan)
62755- atomic_inc(&sbi->s_bal_breaks);
62756+ atomic_inc_unchecked(&sbi->s_bal_breaks);
62757 }
62758
62759 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
62760@@ -3628,7 +3628,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
62761 trace_ext4_mb_new_inode_pa(ac, pa);
62762
62763 ext4_mb_use_inode_pa(ac, pa);
62764- atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
62765+ atomic_add_unchecked(pa->pa_free, &sbi->s_mb_preallocated);
62766
62767 ei = EXT4_I(ac->ac_inode);
62768 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
62769@@ -3688,7 +3688,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
62770 trace_ext4_mb_new_group_pa(ac, pa);
62771
62772 ext4_mb_use_group_pa(ac, pa);
62773- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
62774+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
62775
62776 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
62777 lg = ac->ac_lg;
62778@@ -3777,7 +3777,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
62779 * from the bitmap and continue.
62780 */
62781 }
62782- atomic_add(free, &sbi->s_mb_discarded);
62783+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
62784
62785 return err;
62786 }
62787@@ -3795,7 +3795,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
62788 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
62789 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
62790 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
62791- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
62792+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
62793 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
62794
62795 return 0;
62796diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
62797index 32bce84..112d969 100644
62798--- a/fs/ext4/mmp.c
62799+++ b/fs/ext4/mmp.c
62800@@ -113,7 +113,7 @@ static int read_mmp_block(struct super_block *sb, struct buffer_head **bh,
62801 void __dump_mmp_msg(struct super_block *sb, struct mmp_struct *mmp,
62802 const char *function, unsigned int line, const char *msg)
62803 {
62804- __ext4_warning(sb, function, line, msg);
62805+ __ext4_warning(sb, function, line, "%s", msg);
62806 __ext4_warning(sb, function, line,
62807 "MMP failure info: last update time: %llu, last update "
62808 "node: %s, last update device: %s\n",
62809diff --git a/fs/ext4/super.c b/fs/ext4/super.c
62810index 0b28b36..b85d0f53 100644
62811--- a/fs/ext4/super.c
62812+++ b/fs/ext4/super.c
62813@@ -1276,7 +1276,7 @@ static ext4_fsblk_t get_sb_block(void **data)
62814 }
62815
62816 #define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3))
62817-static char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
62818+static const char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
62819 "Contact linux-ext4@vger.kernel.org if you think we should keep it.\n";
62820
62821 #ifdef CONFIG_QUOTA
62822@@ -2460,7 +2460,7 @@ struct ext4_attr {
62823 int offset;
62824 int deprecated_val;
62825 } u;
62826-};
62827+} __do_const;
62828
62829 static int parse_strtoull(const char *buf,
62830 unsigned long long max, unsigned long long *value)
62831diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
62832index e738733..9843a6c 100644
62833--- a/fs/ext4/xattr.c
62834+++ b/fs/ext4/xattr.c
62835@@ -386,7 +386,7 @@ static int
62836 ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry,
62837 char *buffer, size_t buffer_size)
62838 {
62839- size_t rest = buffer_size;
62840+ size_t rest = buffer_size, total_size = 0;
62841
62842 for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) {
62843 const struct xattr_handler *handler =
62844@@ -403,9 +403,10 @@ ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry,
62845 buffer += size;
62846 }
62847 rest -= size;
62848+ total_size += size;
62849 }
62850 }
62851- return buffer_size - rest;
62852+ return total_size;
62853 }
62854
62855 static int
62856diff --git a/fs/fcntl.c b/fs/fcntl.c
62857index 22d1c3d..600cf7e 100644
62858--- a/fs/fcntl.c
62859+++ b/fs/fcntl.c
62860@@ -107,6 +107,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
62861 if (err)
62862 return err;
62863
62864+ if (gr_handle_chroot_fowner(pid, type))
62865+ return -ENOENT;
62866+ if (gr_check_protected_task_fowner(pid, type))
62867+ return -EACCES;
62868+
62869 f_modown(filp, pid, type, force);
62870 return 0;
62871 }
62872diff --git a/fs/fhandle.c b/fs/fhandle.c
62873index 999ff5c..ac037c9 100644
62874--- a/fs/fhandle.c
62875+++ b/fs/fhandle.c
62876@@ -8,6 +8,7 @@
62877 #include <linux/fs_struct.h>
62878 #include <linux/fsnotify.h>
62879 #include <linux/personality.h>
62880+#include <linux/grsecurity.h>
62881 #include <asm/uaccess.h>
62882 #include "internal.h"
62883 #include "mount.h"
62884@@ -67,8 +68,7 @@ static long do_sys_name_to_handle(struct path *path,
62885 } else
62886 retval = 0;
62887 /* copy the mount id */
62888- if (copy_to_user(mnt_id, &real_mount(path->mnt)->mnt_id,
62889- sizeof(*mnt_id)) ||
62890+ if (put_user(real_mount(path->mnt)->mnt_id, mnt_id) ||
62891 copy_to_user(ufh, handle,
62892 sizeof(struct file_handle) + handle_bytes))
62893 retval = -EFAULT;
62894@@ -175,7 +175,7 @@ static int handle_to_path(int mountdirfd, struct file_handle __user *ufh,
62895 * the directory. Ideally we would like CAP_DAC_SEARCH.
62896 * But we don't have that
62897 */
62898- if (!capable(CAP_DAC_READ_SEARCH)) {
62899+ if (!capable(CAP_DAC_READ_SEARCH) || !gr_chroot_fhandle()) {
62900 retval = -EPERM;
62901 goto out_err;
62902 }
62903diff --git a/fs/file.c b/fs/file.c
62904index 66923fe..2849783 100644
62905--- a/fs/file.c
62906+++ b/fs/file.c
62907@@ -16,6 +16,7 @@
62908 #include <linux/slab.h>
62909 #include <linux/vmalloc.h>
62910 #include <linux/file.h>
62911+#include <linux/security.h>
62912 #include <linux/fdtable.h>
62913 #include <linux/bitops.h>
62914 #include <linux/interrupt.h>
62915@@ -139,7 +140,7 @@ out:
62916 * Return <0 error code on error; 1 on successful completion.
62917 * The files->file_lock should be held on entry, and will be held on exit.
62918 */
62919-static int expand_fdtable(struct files_struct *files, int nr)
62920+static int expand_fdtable(struct files_struct *files, unsigned int nr)
62921 __releases(files->file_lock)
62922 __acquires(files->file_lock)
62923 {
62924@@ -184,7 +185,7 @@ static int expand_fdtable(struct files_struct *files, int nr)
62925 * expanded and execution may have blocked.
62926 * The files->file_lock should be held on entry, and will be held on exit.
62927 */
62928-static int expand_files(struct files_struct *files, int nr)
62929+static int expand_files(struct files_struct *files, unsigned int nr)
62930 {
62931 struct fdtable *fdt;
62932
62933@@ -799,6 +800,7 @@ int replace_fd(unsigned fd, struct file *file, unsigned flags)
62934 if (!file)
62935 return __close_fd(files, fd);
62936
62937+ gr_learn_resource(current, RLIMIT_NOFILE, fd, 0);
62938 if (fd >= rlimit(RLIMIT_NOFILE))
62939 return -EBADF;
62940
62941@@ -825,6 +827,7 @@ SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
62942 if (unlikely(oldfd == newfd))
62943 return -EINVAL;
62944
62945+ gr_learn_resource(current, RLIMIT_NOFILE, newfd, 0);
62946 if (newfd >= rlimit(RLIMIT_NOFILE))
62947 return -EBADF;
62948
62949@@ -880,6 +883,7 @@ SYSCALL_DEFINE1(dup, unsigned int, fildes)
62950 int f_dupfd(unsigned int from, struct file *file, unsigned flags)
62951 {
62952 int err;
62953+ gr_learn_resource(current, RLIMIT_NOFILE, from, 0);
62954 if (from >= rlimit(RLIMIT_NOFILE))
62955 return -EINVAL;
62956 err = alloc_fd(from, flags);
62957diff --git a/fs/filesystems.c b/fs/filesystems.c
62958index 5797d45..7d7d79a 100644
62959--- a/fs/filesystems.c
62960+++ b/fs/filesystems.c
62961@@ -275,7 +275,11 @@ struct file_system_type *get_fs_type(const char *name)
62962 int len = dot ? dot - name : strlen(name);
62963
62964 fs = __get_fs_type(name, len);
62965+#ifdef CONFIG_GRKERNSEC_MODHARDEN
62966+ if (!fs && (___request_module(true, "grsec_modharden_fs", "fs-%.*s", len, name) == 0))
62967+#else
62968 if (!fs && (request_module("fs-%.*s", len, name) == 0))
62969+#endif
62970 fs = __get_fs_type(name, len);
62971
62972 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
62973diff --git a/fs/fs_struct.c b/fs/fs_struct.c
62974index 7dca743..543d620 100644
62975--- a/fs/fs_struct.c
62976+++ b/fs/fs_struct.c
62977@@ -4,6 +4,7 @@
62978 #include <linux/path.h>
62979 #include <linux/slab.h>
62980 #include <linux/fs_struct.h>
62981+#include <linux/grsecurity.h>
62982 #include "internal.h"
62983
62984 /*
62985@@ -19,6 +20,7 @@ void set_fs_root(struct fs_struct *fs, const struct path *path)
62986 write_seqcount_begin(&fs->seq);
62987 old_root = fs->root;
62988 fs->root = *path;
62989+ gr_set_chroot_entries(current, path);
62990 write_seqcount_end(&fs->seq);
62991 spin_unlock(&fs->lock);
62992 if (old_root.dentry)
62993@@ -67,6 +69,10 @@ void chroot_fs_refs(const struct path *old_root, const struct path *new_root)
62994 int hits = 0;
62995 spin_lock(&fs->lock);
62996 write_seqcount_begin(&fs->seq);
62997+ /* this root replacement is only done by pivot_root,
62998+ leave grsec's chroot tagging alone for this task
62999+ so that a pivoted root isn't treated as a chroot
63000+ */
63001 hits += replace_path(&fs->root, old_root, new_root);
63002 hits += replace_path(&fs->pwd, old_root, new_root);
63003 write_seqcount_end(&fs->seq);
63004@@ -99,7 +105,8 @@ void exit_fs(struct task_struct *tsk)
63005 task_lock(tsk);
63006 spin_lock(&fs->lock);
63007 tsk->fs = NULL;
63008- kill = !--fs->users;
63009+ gr_clear_chroot_entries(tsk);
63010+ kill = !atomic_dec_return(&fs->users);
63011 spin_unlock(&fs->lock);
63012 task_unlock(tsk);
63013 if (kill)
63014@@ -112,7 +119,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
63015 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
63016 /* We don't need to lock fs - think why ;-) */
63017 if (fs) {
63018- fs->users = 1;
63019+ atomic_set(&fs->users, 1);
63020 fs->in_exec = 0;
63021 spin_lock_init(&fs->lock);
63022 seqcount_init(&fs->seq);
63023@@ -121,6 +128,9 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
63024 spin_lock(&old->lock);
63025 fs->root = old->root;
63026 path_get(&fs->root);
63027+ /* instead of calling gr_set_chroot_entries here,
63028+ we call it from every caller of this function
63029+ */
63030 fs->pwd = old->pwd;
63031 path_get(&fs->pwd);
63032 spin_unlock(&old->lock);
63033@@ -139,8 +149,9 @@ int unshare_fs_struct(void)
63034
63035 task_lock(current);
63036 spin_lock(&fs->lock);
63037- kill = !--fs->users;
63038+ kill = !atomic_dec_return(&fs->users);
63039 current->fs = new_fs;
63040+ gr_set_chroot_entries(current, &new_fs->root);
63041 spin_unlock(&fs->lock);
63042 task_unlock(current);
63043
63044@@ -153,13 +164,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
63045
63046 int current_umask(void)
63047 {
63048- return current->fs->umask;
63049+ return current->fs->umask | gr_acl_umask();
63050 }
63051 EXPORT_SYMBOL(current_umask);
63052
63053 /* to be mentioned only in INIT_TASK */
63054 struct fs_struct init_fs = {
63055- .users = 1,
63056+ .users = ATOMIC_INIT(1),
63057 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
63058 .seq = SEQCNT_ZERO(init_fs.seq),
63059 .umask = 0022,
63060diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
63061index 89acec7..a575262 100644
63062--- a/fs/fscache/cookie.c
63063+++ b/fs/fscache/cookie.c
63064@@ -19,7 +19,7 @@
63065
63066 struct kmem_cache *fscache_cookie_jar;
63067
63068-static atomic_t fscache_object_debug_id = ATOMIC_INIT(0);
63069+static atomic_unchecked_t fscache_object_debug_id = ATOMIC_INIT(0);
63070
63071 static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie);
63072 static int fscache_alloc_object(struct fscache_cache *cache,
63073@@ -69,11 +69,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
63074 parent ? (char *) parent->def->name : "<no-parent>",
63075 def->name, netfs_data, enable);
63076
63077- fscache_stat(&fscache_n_acquires);
63078+ fscache_stat_unchecked(&fscache_n_acquires);
63079
63080 /* if there's no parent cookie, then we don't create one here either */
63081 if (!parent) {
63082- fscache_stat(&fscache_n_acquires_null);
63083+ fscache_stat_unchecked(&fscache_n_acquires_null);
63084 _leave(" [no parent]");
63085 return NULL;
63086 }
63087@@ -88,7 +88,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
63088 /* allocate and initialise a cookie */
63089 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
63090 if (!cookie) {
63091- fscache_stat(&fscache_n_acquires_oom);
63092+ fscache_stat_unchecked(&fscache_n_acquires_oom);
63093 _leave(" [ENOMEM]");
63094 return NULL;
63095 }
63096@@ -115,13 +115,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
63097
63098 switch (cookie->def->type) {
63099 case FSCACHE_COOKIE_TYPE_INDEX:
63100- fscache_stat(&fscache_n_cookie_index);
63101+ fscache_stat_unchecked(&fscache_n_cookie_index);
63102 break;
63103 case FSCACHE_COOKIE_TYPE_DATAFILE:
63104- fscache_stat(&fscache_n_cookie_data);
63105+ fscache_stat_unchecked(&fscache_n_cookie_data);
63106 break;
63107 default:
63108- fscache_stat(&fscache_n_cookie_special);
63109+ fscache_stat_unchecked(&fscache_n_cookie_special);
63110 break;
63111 }
63112
63113@@ -135,7 +135,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
63114 } else {
63115 atomic_dec(&parent->n_children);
63116 __fscache_cookie_put(cookie);
63117- fscache_stat(&fscache_n_acquires_nobufs);
63118+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
63119 _leave(" = NULL");
63120 return NULL;
63121 }
63122@@ -144,7 +144,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
63123 }
63124 }
63125
63126- fscache_stat(&fscache_n_acquires_ok);
63127+ fscache_stat_unchecked(&fscache_n_acquires_ok);
63128 _leave(" = %p", cookie);
63129 return cookie;
63130 }
63131@@ -213,7 +213,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
63132 cache = fscache_select_cache_for_object(cookie->parent);
63133 if (!cache) {
63134 up_read(&fscache_addremove_sem);
63135- fscache_stat(&fscache_n_acquires_no_cache);
63136+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
63137 _leave(" = -ENOMEDIUM [no cache]");
63138 return -ENOMEDIUM;
63139 }
63140@@ -297,14 +297,14 @@ static int fscache_alloc_object(struct fscache_cache *cache,
63141 object = cache->ops->alloc_object(cache, cookie);
63142 fscache_stat_d(&fscache_n_cop_alloc_object);
63143 if (IS_ERR(object)) {
63144- fscache_stat(&fscache_n_object_no_alloc);
63145+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
63146 ret = PTR_ERR(object);
63147 goto error;
63148 }
63149
63150- fscache_stat(&fscache_n_object_alloc);
63151+ fscache_stat_unchecked(&fscache_n_object_alloc);
63152
63153- object->debug_id = atomic_inc_return(&fscache_object_debug_id);
63154+ object->debug_id = atomic_inc_return_unchecked(&fscache_object_debug_id);
63155
63156 _debug("ALLOC OBJ%x: %s {%lx}",
63157 object->debug_id, cookie->def->name, object->events);
63158@@ -418,7 +418,7 @@ void __fscache_invalidate(struct fscache_cookie *cookie)
63159
63160 _enter("{%s}", cookie->def->name);
63161
63162- fscache_stat(&fscache_n_invalidates);
63163+ fscache_stat_unchecked(&fscache_n_invalidates);
63164
63165 /* Only permit invalidation of data files. Invalidating an index will
63166 * require the caller to release all its attachments to the tree rooted
63167@@ -476,10 +476,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
63168 {
63169 struct fscache_object *object;
63170
63171- fscache_stat(&fscache_n_updates);
63172+ fscache_stat_unchecked(&fscache_n_updates);
63173
63174 if (!cookie) {
63175- fscache_stat(&fscache_n_updates_null);
63176+ fscache_stat_unchecked(&fscache_n_updates_null);
63177 _leave(" [no cookie]");
63178 return;
63179 }
63180@@ -580,12 +580,12 @@ EXPORT_SYMBOL(__fscache_disable_cookie);
63181 */
63182 void __fscache_relinquish_cookie(struct fscache_cookie *cookie, bool retire)
63183 {
63184- fscache_stat(&fscache_n_relinquishes);
63185+ fscache_stat_unchecked(&fscache_n_relinquishes);
63186 if (retire)
63187- fscache_stat(&fscache_n_relinquishes_retire);
63188+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
63189
63190 if (!cookie) {
63191- fscache_stat(&fscache_n_relinquishes_null);
63192+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
63193 _leave(" [no cookie]");
63194 return;
63195 }
63196@@ -686,7 +686,7 @@ int __fscache_check_consistency(struct fscache_cookie *cookie)
63197 if (test_bit(FSCACHE_IOERROR, &object->cache->flags))
63198 goto inconsistent;
63199
63200- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
63201+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
63202
63203 __fscache_use_cookie(cookie);
63204 if (fscache_submit_op(object, op) < 0)
63205diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
63206index 7872a62..d91b19f 100644
63207--- a/fs/fscache/internal.h
63208+++ b/fs/fscache/internal.h
63209@@ -137,8 +137,8 @@ extern void fscache_operation_gc(struct work_struct *);
63210 extern int fscache_wait_for_deferred_lookup(struct fscache_cookie *);
63211 extern int fscache_wait_for_operation_activation(struct fscache_object *,
63212 struct fscache_operation *,
63213- atomic_t *,
63214- atomic_t *,
63215+ atomic_unchecked_t *,
63216+ atomic_unchecked_t *,
63217 void (*)(struct fscache_operation *));
63218 extern void fscache_invalidate_writes(struct fscache_cookie *);
63219
63220@@ -157,101 +157,101 @@ extern void fscache_proc_cleanup(void);
63221 * stats.c
63222 */
63223 #ifdef CONFIG_FSCACHE_STATS
63224-extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
63225-extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
63226+extern atomic_unchecked_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
63227+extern atomic_unchecked_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
63228
63229-extern atomic_t fscache_n_op_pend;
63230-extern atomic_t fscache_n_op_run;
63231-extern atomic_t fscache_n_op_enqueue;
63232-extern atomic_t fscache_n_op_deferred_release;
63233-extern atomic_t fscache_n_op_release;
63234-extern atomic_t fscache_n_op_gc;
63235-extern atomic_t fscache_n_op_cancelled;
63236-extern atomic_t fscache_n_op_rejected;
63237+extern atomic_unchecked_t fscache_n_op_pend;
63238+extern atomic_unchecked_t fscache_n_op_run;
63239+extern atomic_unchecked_t fscache_n_op_enqueue;
63240+extern atomic_unchecked_t fscache_n_op_deferred_release;
63241+extern atomic_unchecked_t fscache_n_op_release;
63242+extern atomic_unchecked_t fscache_n_op_gc;
63243+extern atomic_unchecked_t fscache_n_op_cancelled;
63244+extern atomic_unchecked_t fscache_n_op_rejected;
63245
63246-extern atomic_t fscache_n_attr_changed;
63247-extern atomic_t fscache_n_attr_changed_ok;
63248-extern atomic_t fscache_n_attr_changed_nobufs;
63249-extern atomic_t fscache_n_attr_changed_nomem;
63250-extern atomic_t fscache_n_attr_changed_calls;
63251+extern atomic_unchecked_t fscache_n_attr_changed;
63252+extern atomic_unchecked_t fscache_n_attr_changed_ok;
63253+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
63254+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
63255+extern atomic_unchecked_t fscache_n_attr_changed_calls;
63256
63257-extern atomic_t fscache_n_allocs;
63258-extern atomic_t fscache_n_allocs_ok;
63259-extern atomic_t fscache_n_allocs_wait;
63260-extern atomic_t fscache_n_allocs_nobufs;
63261-extern atomic_t fscache_n_allocs_intr;
63262-extern atomic_t fscache_n_allocs_object_dead;
63263-extern atomic_t fscache_n_alloc_ops;
63264-extern atomic_t fscache_n_alloc_op_waits;
63265+extern atomic_unchecked_t fscache_n_allocs;
63266+extern atomic_unchecked_t fscache_n_allocs_ok;
63267+extern atomic_unchecked_t fscache_n_allocs_wait;
63268+extern atomic_unchecked_t fscache_n_allocs_nobufs;
63269+extern atomic_unchecked_t fscache_n_allocs_intr;
63270+extern atomic_unchecked_t fscache_n_allocs_object_dead;
63271+extern atomic_unchecked_t fscache_n_alloc_ops;
63272+extern atomic_unchecked_t fscache_n_alloc_op_waits;
63273
63274-extern atomic_t fscache_n_retrievals;
63275-extern atomic_t fscache_n_retrievals_ok;
63276-extern atomic_t fscache_n_retrievals_wait;
63277-extern atomic_t fscache_n_retrievals_nodata;
63278-extern atomic_t fscache_n_retrievals_nobufs;
63279-extern atomic_t fscache_n_retrievals_intr;
63280-extern atomic_t fscache_n_retrievals_nomem;
63281-extern atomic_t fscache_n_retrievals_object_dead;
63282-extern atomic_t fscache_n_retrieval_ops;
63283-extern atomic_t fscache_n_retrieval_op_waits;
63284+extern atomic_unchecked_t fscache_n_retrievals;
63285+extern atomic_unchecked_t fscache_n_retrievals_ok;
63286+extern atomic_unchecked_t fscache_n_retrievals_wait;
63287+extern atomic_unchecked_t fscache_n_retrievals_nodata;
63288+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
63289+extern atomic_unchecked_t fscache_n_retrievals_intr;
63290+extern atomic_unchecked_t fscache_n_retrievals_nomem;
63291+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
63292+extern atomic_unchecked_t fscache_n_retrieval_ops;
63293+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
63294
63295-extern atomic_t fscache_n_stores;
63296-extern atomic_t fscache_n_stores_ok;
63297-extern atomic_t fscache_n_stores_again;
63298-extern atomic_t fscache_n_stores_nobufs;
63299-extern atomic_t fscache_n_stores_oom;
63300-extern atomic_t fscache_n_store_ops;
63301-extern atomic_t fscache_n_store_calls;
63302-extern atomic_t fscache_n_store_pages;
63303-extern atomic_t fscache_n_store_radix_deletes;
63304-extern atomic_t fscache_n_store_pages_over_limit;
63305+extern atomic_unchecked_t fscache_n_stores;
63306+extern atomic_unchecked_t fscache_n_stores_ok;
63307+extern atomic_unchecked_t fscache_n_stores_again;
63308+extern atomic_unchecked_t fscache_n_stores_nobufs;
63309+extern atomic_unchecked_t fscache_n_stores_oom;
63310+extern atomic_unchecked_t fscache_n_store_ops;
63311+extern atomic_unchecked_t fscache_n_store_calls;
63312+extern atomic_unchecked_t fscache_n_store_pages;
63313+extern atomic_unchecked_t fscache_n_store_radix_deletes;
63314+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
63315
63316-extern atomic_t fscache_n_store_vmscan_not_storing;
63317-extern atomic_t fscache_n_store_vmscan_gone;
63318-extern atomic_t fscache_n_store_vmscan_busy;
63319-extern atomic_t fscache_n_store_vmscan_cancelled;
63320-extern atomic_t fscache_n_store_vmscan_wait;
63321+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
63322+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
63323+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
63324+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
63325+extern atomic_unchecked_t fscache_n_store_vmscan_wait;
63326
63327-extern atomic_t fscache_n_marks;
63328-extern atomic_t fscache_n_uncaches;
63329+extern atomic_unchecked_t fscache_n_marks;
63330+extern atomic_unchecked_t fscache_n_uncaches;
63331
63332-extern atomic_t fscache_n_acquires;
63333-extern atomic_t fscache_n_acquires_null;
63334-extern atomic_t fscache_n_acquires_no_cache;
63335-extern atomic_t fscache_n_acquires_ok;
63336-extern atomic_t fscache_n_acquires_nobufs;
63337-extern atomic_t fscache_n_acquires_oom;
63338+extern atomic_unchecked_t fscache_n_acquires;
63339+extern atomic_unchecked_t fscache_n_acquires_null;
63340+extern atomic_unchecked_t fscache_n_acquires_no_cache;
63341+extern atomic_unchecked_t fscache_n_acquires_ok;
63342+extern atomic_unchecked_t fscache_n_acquires_nobufs;
63343+extern atomic_unchecked_t fscache_n_acquires_oom;
63344
63345-extern atomic_t fscache_n_invalidates;
63346-extern atomic_t fscache_n_invalidates_run;
63347+extern atomic_unchecked_t fscache_n_invalidates;
63348+extern atomic_unchecked_t fscache_n_invalidates_run;
63349
63350-extern atomic_t fscache_n_updates;
63351-extern atomic_t fscache_n_updates_null;
63352-extern atomic_t fscache_n_updates_run;
63353+extern atomic_unchecked_t fscache_n_updates;
63354+extern atomic_unchecked_t fscache_n_updates_null;
63355+extern atomic_unchecked_t fscache_n_updates_run;
63356
63357-extern atomic_t fscache_n_relinquishes;
63358-extern atomic_t fscache_n_relinquishes_null;
63359-extern atomic_t fscache_n_relinquishes_waitcrt;
63360-extern atomic_t fscache_n_relinquishes_retire;
63361+extern atomic_unchecked_t fscache_n_relinquishes;
63362+extern atomic_unchecked_t fscache_n_relinquishes_null;
63363+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
63364+extern atomic_unchecked_t fscache_n_relinquishes_retire;
63365
63366-extern atomic_t fscache_n_cookie_index;
63367-extern atomic_t fscache_n_cookie_data;
63368-extern atomic_t fscache_n_cookie_special;
63369+extern atomic_unchecked_t fscache_n_cookie_index;
63370+extern atomic_unchecked_t fscache_n_cookie_data;
63371+extern atomic_unchecked_t fscache_n_cookie_special;
63372
63373-extern atomic_t fscache_n_object_alloc;
63374-extern atomic_t fscache_n_object_no_alloc;
63375-extern atomic_t fscache_n_object_lookups;
63376-extern atomic_t fscache_n_object_lookups_negative;
63377-extern atomic_t fscache_n_object_lookups_positive;
63378-extern atomic_t fscache_n_object_lookups_timed_out;
63379-extern atomic_t fscache_n_object_created;
63380-extern atomic_t fscache_n_object_avail;
63381-extern atomic_t fscache_n_object_dead;
63382+extern atomic_unchecked_t fscache_n_object_alloc;
63383+extern atomic_unchecked_t fscache_n_object_no_alloc;
63384+extern atomic_unchecked_t fscache_n_object_lookups;
63385+extern atomic_unchecked_t fscache_n_object_lookups_negative;
63386+extern atomic_unchecked_t fscache_n_object_lookups_positive;
63387+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
63388+extern atomic_unchecked_t fscache_n_object_created;
63389+extern atomic_unchecked_t fscache_n_object_avail;
63390+extern atomic_unchecked_t fscache_n_object_dead;
63391
63392-extern atomic_t fscache_n_checkaux_none;
63393-extern atomic_t fscache_n_checkaux_okay;
63394-extern atomic_t fscache_n_checkaux_update;
63395-extern atomic_t fscache_n_checkaux_obsolete;
63396+extern atomic_unchecked_t fscache_n_checkaux_none;
63397+extern atomic_unchecked_t fscache_n_checkaux_okay;
63398+extern atomic_unchecked_t fscache_n_checkaux_update;
63399+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
63400
63401 extern atomic_t fscache_n_cop_alloc_object;
63402 extern atomic_t fscache_n_cop_lookup_object;
63403@@ -276,6 +276,11 @@ static inline void fscache_stat(atomic_t *stat)
63404 atomic_inc(stat);
63405 }
63406
63407+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
63408+{
63409+ atomic_inc_unchecked(stat);
63410+}
63411+
63412 static inline void fscache_stat_d(atomic_t *stat)
63413 {
63414 atomic_dec(stat);
63415@@ -288,6 +293,7 @@ extern const struct file_operations fscache_stats_fops;
63416
63417 #define __fscache_stat(stat) (NULL)
63418 #define fscache_stat(stat) do {} while (0)
63419+#define fscache_stat_unchecked(stat) do {} while (0)
63420 #define fscache_stat_d(stat) do {} while (0)
63421 #endif
63422
63423diff --git a/fs/fscache/object.c b/fs/fscache/object.c
63424index da032da..0076ce7 100644
63425--- a/fs/fscache/object.c
63426+++ b/fs/fscache/object.c
63427@@ -454,7 +454,7 @@ static const struct fscache_state *fscache_look_up_object(struct fscache_object
63428 _debug("LOOKUP \"%s\" in \"%s\"",
63429 cookie->def->name, object->cache->tag->name);
63430
63431- fscache_stat(&fscache_n_object_lookups);
63432+ fscache_stat_unchecked(&fscache_n_object_lookups);
63433 fscache_stat(&fscache_n_cop_lookup_object);
63434 ret = object->cache->ops->lookup_object(object);
63435 fscache_stat_d(&fscache_n_cop_lookup_object);
63436@@ -464,7 +464,7 @@ static const struct fscache_state *fscache_look_up_object(struct fscache_object
63437 if (ret == -ETIMEDOUT) {
63438 /* probably stuck behind another object, so move this one to
63439 * the back of the queue */
63440- fscache_stat(&fscache_n_object_lookups_timed_out);
63441+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
63442 _leave(" [timeout]");
63443 return NO_TRANSIT;
63444 }
63445@@ -492,7 +492,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
63446 _enter("{OBJ%x,%s}", object->debug_id, object->state->name);
63447
63448 if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
63449- fscache_stat(&fscache_n_object_lookups_negative);
63450+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
63451
63452 /* Allow write requests to begin stacking up and read requests to begin
63453 * returning ENODATA.
63454@@ -527,7 +527,7 @@ void fscache_obtained_object(struct fscache_object *object)
63455 /* if we were still looking up, then we must have a positive lookup
63456 * result, in which case there may be data available */
63457 if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
63458- fscache_stat(&fscache_n_object_lookups_positive);
63459+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
63460
63461 /* We do (presumably) have data */
63462 clear_bit_unlock(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
63463@@ -539,7 +539,7 @@ void fscache_obtained_object(struct fscache_object *object)
63464 clear_bit_unlock(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags);
63465 wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
63466 } else {
63467- fscache_stat(&fscache_n_object_created);
63468+ fscache_stat_unchecked(&fscache_n_object_created);
63469 }
63470
63471 set_bit(FSCACHE_OBJECT_IS_AVAILABLE, &object->flags);
63472@@ -575,7 +575,7 @@ static const struct fscache_state *fscache_object_available(struct fscache_objec
63473 fscache_stat_d(&fscache_n_cop_lookup_complete);
63474
63475 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
63476- fscache_stat(&fscache_n_object_avail);
63477+ fscache_stat_unchecked(&fscache_n_object_avail);
63478
63479 _leave("");
63480 return transit_to(JUMPSTART_DEPS);
63481@@ -722,7 +722,7 @@ static const struct fscache_state *fscache_drop_object(struct fscache_object *ob
63482
63483 /* this just shifts the object release to the work processor */
63484 fscache_put_object(object);
63485- fscache_stat(&fscache_n_object_dead);
63486+ fscache_stat_unchecked(&fscache_n_object_dead);
63487
63488 _leave("");
63489 return transit_to(OBJECT_DEAD);
63490@@ -887,7 +887,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
63491 enum fscache_checkaux result;
63492
63493 if (!object->cookie->def->check_aux) {
63494- fscache_stat(&fscache_n_checkaux_none);
63495+ fscache_stat_unchecked(&fscache_n_checkaux_none);
63496 return FSCACHE_CHECKAUX_OKAY;
63497 }
63498
63499@@ -896,17 +896,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
63500 switch (result) {
63501 /* entry okay as is */
63502 case FSCACHE_CHECKAUX_OKAY:
63503- fscache_stat(&fscache_n_checkaux_okay);
63504+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
63505 break;
63506
63507 /* entry requires update */
63508 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
63509- fscache_stat(&fscache_n_checkaux_update);
63510+ fscache_stat_unchecked(&fscache_n_checkaux_update);
63511 break;
63512
63513 /* entry requires deletion */
63514 case FSCACHE_CHECKAUX_OBSOLETE:
63515- fscache_stat(&fscache_n_checkaux_obsolete);
63516+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
63517 break;
63518
63519 default:
63520@@ -993,7 +993,7 @@ static const struct fscache_state *fscache_invalidate_object(struct fscache_obje
63521 {
63522 const struct fscache_state *s;
63523
63524- fscache_stat(&fscache_n_invalidates_run);
63525+ fscache_stat_unchecked(&fscache_n_invalidates_run);
63526 fscache_stat(&fscache_n_cop_invalidate_object);
63527 s = _fscache_invalidate_object(object, event);
63528 fscache_stat_d(&fscache_n_cop_invalidate_object);
63529@@ -1008,7 +1008,7 @@ static const struct fscache_state *fscache_update_object(struct fscache_object *
63530 {
63531 _enter("{OBJ%x},%d", object->debug_id, event);
63532
63533- fscache_stat(&fscache_n_updates_run);
63534+ fscache_stat_unchecked(&fscache_n_updates_run);
63535 fscache_stat(&fscache_n_cop_update_object);
63536 object->cache->ops->update_object(object);
63537 fscache_stat_d(&fscache_n_cop_update_object);
63538diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
63539index e7b87a0..a85d47a 100644
63540--- a/fs/fscache/operation.c
63541+++ b/fs/fscache/operation.c
63542@@ -17,7 +17,7 @@
63543 #include <linux/slab.h>
63544 #include "internal.h"
63545
63546-atomic_t fscache_op_debug_id;
63547+atomic_unchecked_t fscache_op_debug_id;
63548 EXPORT_SYMBOL(fscache_op_debug_id);
63549
63550 /**
63551@@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
63552 ASSERTCMP(atomic_read(&op->usage), >, 0);
63553 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS);
63554
63555- fscache_stat(&fscache_n_op_enqueue);
63556+ fscache_stat_unchecked(&fscache_n_op_enqueue);
63557 switch (op->flags & FSCACHE_OP_TYPE) {
63558 case FSCACHE_OP_ASYNC:
63559 _debug("queue async");
63560@@ -72,7 +72,7 @@ static void fscache_run_op(struct fscache_object *object,
63561 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
63562 if (op->processor)
63563 fscache_enqueue_operation(op);
63564- fscache_stat(&fscache_n_op_run);
63565+ fscache_stat_unchecked(&fscache_n_op_run);
63566 }
63567
63568 /*
63569@@ -104,11 +104,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
63570 if (object->n_in_progress > 0) {
63571 atomic_inc(&op->usage);
63572 list_add_tail(&op->pend_link, &object->pending_ops);
63573- fscache_stat(&fscache_n_op_pend);
63574+ fscache_stat_unchecked(&fscache_n_op_pend);
63575 } else if (!list_empty(&object->pending_ops)) {
63576 atomic_inc(&op->usage);
63577 list_add_tail(&op->pend_link, &object->pending_ops);
63578- fscache_stat(&fscache_n_op_pend);
63579+ fscache_stat_unchecked(&fscache_n_op_pend);
63580 fscache_start_operations(object);
63581 } else {
63582 ASSERTCMP(object->n_in_progress, ==, 0);
63583@@ -124,7 +124,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
63584 object->n_exclusive++; /* reads and writes must wait */
63585 atomic_inc(&op->usage);
63586 list_add_tail(&op->pend_link, &object->pending_ops);
63587- fscache_stat(&fscache_n_op_pend);
63588+ fscache_stat_unchecked(&fscache_n_op_pend);
63589 ret = 0;
63590 } else {
63591 /* If we're in any other state, there must have been an I/O
63592@@ -211,11 +211,11 @@ int fscache_submit_op(struct fscache_object *object,
63593 if (object->n_exclusive > 0) {
63594 atomic_inc(&op->usage);
63595 list_add_tail(&op->pend_link, &object->pending_ops);
63596- fscache_stat(&fscache_n_op_pend);
63597+ fscache_stat_unchecked(&fscache_n_op_pend);
63598 } else if (!list_empty(&object->pending_ops)) {
63599 atomic_inc(&op->usage);
63600 list_add_tail(&op->pend_link, &object->pending_ops);
63601- fscache_stat(&fscache_n_op_pend);
63602+ fscache_stat_unchecked(&fscache_n_op_pend);
63603 fscache_start_operations(object);
63604 } else {
63605 ASSERTCMP(object->n_exclusive, ==, 0);
63606@@ -227,10 +227,10 @@ int fscache_submit_op(struct fscache_object *object,
63607 object->n_ops++;
63608 atomic_inc(&op->usage);
63609 list_add_tail(&op->pend_link, &object->pending_ops);
63610- fscache_stat(&fscache_n_op_pend);
63611+ fscache_stat_unchecked(&fscache_n_op_pend);
63612 ret = 0;
63613 } else if (fscache_object_is_dying(object)) {
63614- fscache_stat(&fscache_n_op_rejected);
63615+ fscache_stat_unchecked(&fscache_n_op_rejected);
63616 op->state = FSCACHE_OP_ST_CANCELLED;
63617 ret = -ENOBUFS;
63618 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
63619@@ -309,7 +309,7 @@ int fscache_cancel_op(struct fscache_operation *op,
63620 ret = -EBUSY;
63621 if (op->state == FSCACHE_OP_ST_PENDING) {
63622 ASSERT(!list_empty(&op->pend_link));
63623- fscache_stat(&fscache_n_op_cancelled);
63624+ fscache_stat_unchecked(&fscache_n_op_cancelled);
63625 list_del_init(&op->pend_link);
63626 if (do_cancel)
63627 do_cancel(op);
63628@@ -341,7 +341,7 @@ void fscache_cancel_all_ops(struct fscache_object *object)
63629 while (!list_empty(&object->pending_ops)) {
63630 op = list_entry(object->pending_ops.next,
63631 struct fscache_operation, pend_link);
63632- fscache_stat(&fscache_n_op_cancelled);
63633+ fscache_stat_unchecked(&fscache_n_op_cancelled);
63634 list_del_init(&op->pend_link);
63635
63636 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_PENDING);
63637@@ -413,7 +413,7 @@ void fscache_put_operation(struct fscache_operation *op)
63638 op->state, ==, FSCACHE_OP_ST_CANCELLED);
63639 op->state = FSCACHE_OP_ST_DEAD;
63640
63641- fscache_stat(&fscache_n_op_release);
63642+ fscache_stat_unchecked(&fscache_n_op_release);
63643
63644 if (op->release) {
63645 op->release(op);
63646@@ -432,7 +432,7 @@ void fscache_put_operation(struct fscache_operation *op)
63647 * lock, and defer it otherwise */
63648 if (!spin_trylock(&object->lock)) {
63649 _debug("defer put");
63650- fscache_stat(&fscache_n_op_deferred_release);
63651+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
63652
63653 cache = object->cache;
63654 spin_lock(&cache->op_gc_list_lock);
63655@@ -485,7 +485,7 @@ void fscache_operation_gc(struct work_struct *work)
63656
63657 _debug("GC DEFERRED REL OBJ%x OP%x",
63658 object->debug_id, op->debug_id);
63659- fscache_stat(&fscache_n_op_gc);
63660+ fscache_stat_unchecked(&fscache_n_op_gc);
63661
63662 ASSERTCMP(atomic_read(&op->usage), ==, 0);
63663 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_DEAD);
63664diff --git a/fs/fscache/page.c b/fs/fscache/page.c
63665index de33b3f..8be4d29 100644
63666--- a/fs/fscache/page.c
63667+++ b/fs/fscache/page.c
63668@@ -74,7 +74,7 @@ try_again:
63669 val = radix_tree_lookup(&cookie->stores, page->index);
63670 if (!val) {
63671 rcu_read_unlock();
63672- fscache_stat(&fscache_n_store_vmscan_not_storing);
63673+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
63674 __fscache_uncache_page(cookie, page);
63675 return true;
63676 }
63677@@ -104,11 +104,11 @@ try_again:
63678 spin_unlock(&cookie->stores_lock);
63679
63680 if (xpage) {
63681- fscache_stat(&fscache_n_store_vmscan_cancelled);
63682- fscache_stat(&fscache_n_store_radix_deletes);
63683+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
63684+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
63685 ASSERTCMP(xpage, ==, page);
63686 } else {
63687- fscache_stat(&fscache_n_store_vmscan_gone);
63688+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
63689 }
63690
63691 wake_up_bit(&cookie->flags, 0);
63692@@ -123,11 +123,11 @@ page_busy:
63693 * sleeping on memory allocation, so we may need to impose a timeout
63694 * too. */
63695 if (!(gfp & __GFP_WAIT) || !(gfp & __GFP_FS)) {
63696- fscache_stat(&fscache_n_store_vmscan_busy);
63697+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
63698 return false;
63699 }
63700
63701- fscache_stat(&fscache_n_store_vmscan_wait);
63702+ fscache_stat_unchecked(&fscache_n_store_vmscan_wait);
63703 if (!release_page_wait_timeout(cookie, page))
63704 _debug("fscache writeout timeout page: %p{%lx}",
63705 page, page->index);
63706@@ -156,7 +156,7 @@ static void fscache_end_page_write(struct fscache_object *object,
63707 FSCACHE_COOKIE_STORING_TAG);
63708 if (!radix_tree_tag_get(&cookie->stores, page->index,
63709 FSCACHE_COOKIE_PENDING_TAG)) {
63710- fscache_stat(&fscache_n_store_radix_deletes);
63711+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
63712 xpage = radix_tree_delete(&cookie->stores, page->index);
63713 }
63714 spin_unlock(&cookie->stores_lock);
63715@@ -177,7 +177,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
63716
63717 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
63718
63719- fscache_stat(&fscache_n_attr_changed_calls);
63720+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
63721
63722 if (fscache_object_is_active(object)) {
63723 fscache_stat(&fscache_n_cop_attr_changed);
63724@@ -204,11 +204,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
63725
63726 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
63727
63728- fscache_stat(&fscache_n_attr_changed);
63729+ fscache_stat_unchecked(&fscache_n_attr_changed);
63730
63731 op = kzalloc(sizeof(*op), GFP_KERNEL);
63732 if (!op) {
63733- fscache_stat(&fscache_n_attr_changed_nomem);
63734+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
63735 _leave(" = -ENOMEM");
63736 return -ENOMEM;
63737 }
63738@@ -230,7 +230,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
63739 if (fscache_submit_exclusive_op(object, op) < 0)
63740 goto nobufs_dec;
63741 spin_unlock(&cookie->lock);
63742- fscache_stat(&fscache_n_attr_changed_ok);
63743+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
63744 fscache_put_operation(op);
63745 _leave(" = 0");
63746 return 0;
63747@@ -242,7 +242,7 @@ nobufs:
63748 kfree(op);
63749 if (wake_cookie)
63750 __fscache_wake_unused_cookie(cookie);
63751- fscache_stat(&fscache_n_attr_changed_nobufs);
63752+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
63753 _leave(" = %d", -ENOBUFS);
63754 return -ENOBUFS;
63755 }
63756@@ -281,7 +281,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
63757 /* allocate a retrieval operation and attempt to submit it */
63758 op = kzalloc(sizeof(*op), GFP_NOIO);
63759 if (!op) {
63760- fscache_stat(&fscache_n_retrievals_nomem);
63761+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
63762 return NULL;
63763 }
63764
63765@@ -311,12 +311,12 @@ int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
63766 return 0;
63767 }
63768
63769- fscache_stat(&fscache_n_retrievals_wait);
63770+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
63771
63772 jif = jiffies;
63773 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
63774 TASK_INTERRUPTIBLE) != 0) {
63775- fscache_stat(&fscache_n_retrievals_intr);
63776+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
63777 _leave(" = -ERESTARTSYS");
63778 return -ERESTARTSYS;
63779 }
63780@@ -345,8 +345,8 @@ static void fscache_do_cancel_retrieval(struct fscache_operation *_op)
63781 */
63782 int fscache_wait_for_operation_activation(struct fscache_object *object,
63783 struct fscache_operation *op,
63784- atomic_t *stat_op_waits,
63785- atomic_t *stat_object_dead,
63786+ atomic_unchecked_t *stat_op_waits,
63787+ atomic_unchecked_t *stat_object_dead,
63788 void (*do_cancel)(struct fscache_operation *))
63789 {
63790 int ret;
63791@@ -356,7 +356,7 @@ int fscache_wait_for_operation_activation(struct fscache_object *object,
63792
63793 _debug(">>> WT");
63794 if (stat_op_waits)
63795- fscache_stat(stat_op_waits);
63796+ fscache_stat_unchecked(stat_op_waits);
63797 if (wait_on_bit(&op->flags, FSCACHE_OP_WAITING,
63798 TASK_INTERRUPTIBLE) != 0) {
63799 ret = fscache_cancel_op(op, do_cancel);
63800@@ -373,7 +373,7 @@ int fscache_wait_for_operation_activation(struct fscache_object *object,
63801 check_if_dead:
63802 if (op->state == FSCACHE_OP_ST_CANCELLED) {
63803 if (stat_object_dead)
63804- fscache_stat(stat_object_dead);
63805+ fscache_stat_unchecked(stat_object_dead);
63806 _leave(" = -ENOBUFS [cancelled]");
63807 return -ENOBUFS;
63808 }
63809@@ -381,7 +381,7 @@ check_if_dead:
63810 pr_err("%s() = -ENOBUFS [obj dead %d]\n", __func__, op->state);
63811 fscache_cancel_op(op, do_cancel);
63812 if (stat_object_dead)
63813- fscache_stat(stat_object_dead);
63814+ fscache_stat_unchecked(stat_object_dead);
63815 return -ENOBUFS;
63816 }
63817 return 0;
63818@@ -409,7 +409,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
63819
63820 _enter("%p,%p,,,", cookie, page);
63821
63822- fscache_stat(&fscache_n_retrievals);
63823+ fscache_stat_unchecked(&fscache_n_retrievals);
63824
63825 if (hlist_empty(&cookie->backing_objects))
63826 goto nobufs;
63827@@ -451,7 +451,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
63828 goto nobufs_unlock_dec;
63829 spin_unlock(&cookie->lock);
63830
63831- fscache_stat(&fscache_n_retrieval_ops);
63832+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
63833
63834 /* pin the netfs read context in case we need to do the actual netfs
63835 * read because we've encountered a cache read failure */
63836@@ -482,15 +482,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
63837
63838 error:
63839 if (ret == -ENOMEM)
63840- fscache_stat(&fscache_n_retrievals_nomem);
63841+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
63842 else if (ret == -ERESTARTSYS)
63843- fscache_stat(&fscache_n_retrievals_intr);
63844+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
63845 else if (ret == -ENODATA)
63846- fscache_stat(&fscache_n_retrievals_nodata);
63847+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
63848 else if (ret < 0)
63849- fscache_stat(&fscache_n_retrievals_nobufs);
63850+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
63851 else
63852- fscache_stat(&fscache_n_retrievals_ok);
63853+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
63854
63855 fscache_put_retrieval(op);
63856 _leave(" = %d", ret);
63857@@ -505,7 +505,7 @@ nobufs_unlock:
63858 __fscache_wake_unused_cookie(cookie);
63859 kfree(op);
63860 nobufs:
63861- fscache_stat(&fscache_n_retrievals_nobufs);
63862+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
63863 _leave(" = -ENOBUFS");
63864 return -ENOBUFS;
63865 }
63866@@ -544,7 +544,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
63867
63868 _enter("%p,,%d,,,", cookie, *nr_pages);
63869
63870- fscache_stat(&fscache_n_retrievals);
63871+ fscache_stat_unchecked(&fscache_n_retrievals);
63872
63873 if (hlist_empty(&cookie->backing_objects))
63874 goto nobufs;
63875@@ -582,7 +582,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
63876 goto nobufs_unlock_dec;
63877 spin_unlock(&cookie->lock);
63878
63879- fscache_stat(&fscache_n_retrieval_ops);
63880+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
63881
63882 /* pin the netfs read context in case we need to do the actual netfs
63883 * read because we've encountered a cache read failure */
63884@@ -613,15 +613,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
63885
63886 error:
63887 if (ret == -ENOMEM)
63888- fscache_stat(&fscache_n_retrievals_nomem);
63889+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
63890 else if (ret == -ERESTARTSYS)
63891- fscache_stat(&fscache_n_retrievals_intr);
63892+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
63893 else if (ret == -ENODATA)
63894- fscache_stat(&fscache_n_retrievals_nodata);
63895+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
63896 else if (ret < 0)
63897- fscache_stat(&fscache_n_retrievals_nobufs);
63898+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
63899 else
63900- fscache_stat(&fscache_n_retrievals_ok);
63901+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
63902
63903 fscache_put_retrieval(op);
63904 _leave(" = %d", ret);
63905@@ -636,7 +636,7 @@ nobufs_unlock:
63906 if (wake_cookie)
63907 __fscache_wake_unused_cookie(cookie);
63908 nobufs:
63909- fscache_stat(&fscache_n_retrievals_nobufs);
63910+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
63911 _leave(" = -ENOBUFS");
63912 return -ENOBUFS;
63913 }
63914@@ -661,7 +661,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
63915
63916 _enter("%p,%p,,,", cookie, page);
63917
63918- fscache_stat(&fscache_n_allocs);
63919+ fscache_stat_unchecked(&fscache_n_allocs);
63920
63921 if (hlist_empty(&cookie->backing_objects))
63922 goto nobufs;
63923@@ -695,7 +695,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
63924 goto nobufs_unlock_dec;
63925 spin_unlock(&cookie->lock);
63926
63927- fscache_stat(&fscache_n_alloc_ops);
63928+ fscache_stat_unchecked(&fscache_n_alloc_ops);
63929
63930 ret = fscache_wait_for_operation_activation(
63931 object, &op->op,
63932@@ -712,11 +712,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
63933
63934 error:
63935 if (ret == -ERESTARTSYS)
63936- fscache_stat(&fscache_n_allocs_intr);
63937+ fscache_stat_unchecked(&fscache_n_allocs_intr);
63938 else if (ret < 0)
63939- fscache_stat(&fscache_n_allocs_nobufs);
63940+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
63941 else
63942- fscache_stat(&fscache_n_allocs_ok);
63943+ fscache_stat_unchecked(&fscache_n_allocs_ok);
63944
63945 fscache_put_retrieval(op);
63946 _leave(" = %d", ret);
63947@@ -730,7 +730,7 @@ nobufs_unlock:
63948 if (wake_cookie)
63949 __fscache_wake_unused_cookie(cookie);
63950 nobufs:
63951- fscache_stat(&fscache_n_allocs_nobufs);
63952+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
63953 _leave(" = -ENOBUFS");
63954 return -ENOBUFS;
63955 }
63956@@ -806,7 +806,7 @@ static void fscache_write_op(struct fscache_operation *_op)
63957
63958 spin_lock(&cookie->stores_lock);
63959
63960- fscache_stat(&fscache_n_store_calls);
63961+ fscache_stat_unchecked(&fscache_n_store_calls);
63962
63963 /* find a page to store */
63964 page = NULL;
63965@@ -817,7 +817,7 @@ static void fscache_write_op(struct fscache_operation *_op)
63966 page = results[0];
63967 _debug("gang %d [%lx]", n, page->index);
63968 if (page->index > op->store_limit) {
63969- fscache_stat(&fscache_n_store_pages_over_limit);
63970+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
63971 goto superseded;
63972 }
63973
63974@@ -829,7 +829,7 @@ static void fscache_write_op(struct fscache_operation *_op)
63975 spin_unlock(&cookie->stores_lock);
63976 spin_unlock(&object->lock);
63977
63978- fscache_stat(&fscache_n_store_pages);
63979+ fscache_stat_unchecked(&fscache_n_store_pages);
63980 fscache_stat(&fscache_n_cop_write_page);
63981 ret = object->cache->ops->write_page(op, page);
63982 fscache_stat_d(&fscache_n_cop_write_page);
63983@@ -933,7 +933,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
63984 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
63985 ASSERT(PageFsCache(page));
63986
63987- fscache_stat(&fscache_n_stores);
63988+ fscache_stat_unchecked(&fscache_n_stores);
63989
63990 if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
63991 _leave(" = -ENOBUFS [invalidating]");
63992@@ -992,7 +992,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
63993 spin_unlock(&cookie->stores_lock);
63994 spin_unlock(&object->lock);
63995
63996- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
63997+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
63998 op->store_limit = object->store_limit;
63999
64000 __fscache_use_cookie(cookie);
64001@@ -1001,8 +1001,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
64002
64003 spin_unlock(&cookie->lock);
64004 radix_tree_preload_end();
64005- fscache_stat(&fscache_n_store_ops);
64006- fscache_stat(&fscache_n_stores_ok);
64007+ fscache_stat_unchecked(&fscache_n_store_ops);
64008+ fscache_stat_unchecked(&fscache_n_stores_ok);
64009
64010 /* the work queue now carries its own ref on the object */
64011 fscache_put_operation(&op->op);
64012@@ -1010,14 +1010,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
64013 return 0;
64014
64015 already_queued:
64016- fscache_stat(&fscache_n_stores_again);
64017+ fscache_stat_unchecked(&fscache_n_stores_again);
64018 already_pending:
64019 spin_unlock(&cookie->stores_lock);
64020 spin_unlock(&object->lock);
64021 spin_unlock(&cookie->lock);
64022 radix_tree_preload_end();
64023 kfree(op);
64024- fscache_stat(&fscache_n_stores_ok);
64025+ fscache_stat_unchecked(&fscache_n_stores_ok);
64026 _leave(" = 0");
64027 return 0;
64028
64029@@ -1039,14 +1039,14 @@ nobufs:
64030 kfree(op);
64031 if (wake_cookie)
64032 __fscache_wake_unused_cookie(cookie);
64033- fscache_stat(&fscache_n_stores_nobufs);
64034+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
64035 _leave(" = -ENOBUFS");
64036 return -ENOBUFS;
64037
64038 nomem_free:
64039 kfree(op);
64040 nomem:
64041- fscache_stat(&fscache_n_stores_oom);
64042+ fscache_stat_unchecked(&fscache_n_stores_oom);
64043 _leave(" = -ENOMEM");
64044 return -ENOMEM;
64045 }
64046@@ -1064,7 +1064,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
64047 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
64048 ASSERTCMP(page, !=, NULL);
64049
64050- fscache_stat(&fscache_n_uncaches);
64051+ fscache_stat_unchecked(&fscache_n_uncaches);
64052
64053 /* cache withdrawal may beat us to it */
64054 if (!PageFsCache(page))
64055@@ -1115,7 +1115,7 @@ void fscache_mark_page_cached(struct fscache_retrieval *op, struct page *page)
64056 struct fscache_cookie *cookie = op->op.object->cookie;
64057
64058 #ifdef CONFIG_FSCACHE_STATS
64059- atomic_inc(&fscache_n_marks);
64060+ atomic_inc_unchecked(&fscache_n_marks);
64061 #endif
64062
64063 _debug("- mark %p{%lx}", page, page->index);
64064diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
64065index 40d13c7..ddf52b9 100644
64066--- a/fs/fscache/stats.c
64067+++ b/fs/fscache/stats.c
64068@@ -18,99 +18,99 @@
64069 /*
64070 * operation counters
64071 */
64072-atomic_t fscache_n_op_pend;
64073-atomic_t fscache_n_op_run;
64074-atomic_t fscache_n_op_enqueue;
64075-atomic_t fscache_n_op_requeue;
64076-atomic_t fscache_n_op_deferred_release;
64077-atomic_t fscache_n_op_release;
64078-atomic_t fscache_n_op_gc;
64079-atomic_t fscache_n_op_cancelled;
64080-atomic_t fscache_n_op_rejected;
64081+atomic_unchecked_t fscache_n_op_pend;
64082+atomic_unchecked_t fscache_n_op_run;
64083+atomic_unchecked_t fscache_n_op_enqueue;
64084+atomic_unchecked_t fscache_n_op_requeue;
64085+atomic_unchecked_t fscache_n_op_deferred_release;
64086+atomic_unchecked_t fscache_n_op_release;
64087+atomic_unchecked_t fscache_n_op_gc;
64088+atomic_unchecked_t fscache_n_op_cancelled;
64089+atomic_unchecked_t fscache_n_op_rejected;
64090
64091-atomic_t fscache_n_attr_changed;
64092-atomic_t fscache_n_attr_changed_ok;
64093-atomic_t fscache_n_attr_changed_nobufs;
64094-atomic_t fscache_n_attr_changed_nomem;
64095-atomic_t fscache_n_attr_changed_calls;
64096+atomic_unchecked_t fscache_n_attr_changed;
64097+atomic_unchecked_t fscache_n_attr_changed_ok;
64098+atomic_unchecked_t fscache_n_attr_changed_nobufs;
64099+atomic_unchecked_t fscache_n_attr_changed_nomem;
64100+atomic_unchecked_t fscache_n_attr_changed_calls;
64101
64102-atomic_t fscache_n_allocs;
64103-atomic_t fscache_n_allocs_ok;
64104-atomic_t fscache_n_allocs_wait;
64105-atomic_t fscache_n_allocs_nobufs;
64106-atomic_t fscache_n_allocs_intr;
64107-atomic_t fscache_n_allocs_object_dead;
64108-atomic_t fscache_n_alloc_ops;
64109-atomic_t fscache_n_alloc_op_waits;
64110+atomic_unchecked_t fscache_n_allocs;
64111+atomic_unchecked_t fscache_n_allocs_ok;
64112+atomic_unchecked_t fscache_n_allocs_wait;
64113+atomic_unchecked_t fscache_n_allocs_nobufs;
64114+atomic_unchecked_t fscache_n_allocs_intr;
64115+atomic_unchecked_t fscache_n_allocs_object_dead;
64116+atomic_unchecked_t fscache_n_alloc_ops;
64117+atomic_unchecked_t fscache_n_alloc_op_waits;
64118
64119-atomic_t fscache_n_retrievals;
64120-atomic_t fscache_n_retrievals_ok;
64121-atomic_t fscache_n_retrievals_wait;
64122-atomic_t fscache_n_retrievals_nodata;
64123-atomic_t fscache_n_retrievals_nobufs;
64124-atomic_t fscache_n_retrievals_intr;
64125-atomic_t fscache_n_retrievals_nomem;
64126-atomic_t fscache_n_retrievals_object_dead;
64127-atomic_t fscache_n_retrieval_ops;
64128-atomic_t fscache_n_retrieval_op_waits;
64129+atomic_unchecked_t fscache_n_retrievals;
64130+atomic_unchecked_t fscache_n_retrievals_ok;
64131+atomic_unchecked_t fscache_n_retrievals_wait;
64132+atomic_unchecked_t fscache_n_retrievals_nodata;
64133+atomic_unchecked_t fscache_n_retrievals_nobufs;
64134+atomic_unchecked_t fscache_n_retrievals_intr;
64135+atomic_unchecked_t fscache_n_retrievals_nomem;
64136+atomic_unchecked_t fscache_n_retrievals_object_dead;
64137+atomic_unchecked_t fscache_n_retrieval_ops;
64138+atomic_unchecked_t fscache_n_retrieval_op_waits;
64139
64140-atomic_t fscache_n_stores;
64141-atomic_t fscache_n_stores_ok;
64142-atomic_t fscache_n_stores_again;
64143-atomic_t fscache_n_stores_nobufs;
64144-atomic_t fscache_n_stores_oom;
64145-atomic_t fscache_n_store_ops;
64146-atomic_t fscache_n_store_calls;
64147-atomic_t fscache_n_store_pages;
64148-atomic_t fscache_n_store_radix_deletes;
64149-atomic_t fscache_n_store_pages_over_limit;
64150+atomic_unchecked_t fscache_n_stores;
64151+atomic_unchecked_t fscache_n_stores_ok;
64152+atomic_unchecked_t fscache_n_stores_again;
64153+atomic_unchecked_t fscache_n_stores_nobufs;
64154+atomic_unchecked_t fscache_n_stores_oom;
64155+atomic_unchecked_t fscache_n_store_ops;
64156+atomic_unchecked_t fscache_n_store_calls;
64157+atomic_unchecked_t fscache_n_store_pages;
64158+atomic_unchecked_t fscache_n_store_radix_deletes;
64159+atomic_unchecked_t fscache_n_store_pages_over_limit;
64160
64161-atomic_t fscache_n_store_vmscan_not_storing;
64162-atomic_t fscache_n_store_vmscan_gone;
64163-atomic_t fscache_n_store_vmscan_busy;
64164-atomic_t fscache_n_store_vmscan_cancelled;
64165-atomic_t fscache_n_store_vmscan_wait;
64166+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
64167+atomic_unchecked_t fscache_n_store_vmscan_gone;
64168+atomic_unchecked_t fscache_n_store_vmscan_busy;
64169+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
64170+atomic_unchecked_t fscache_n_store_vmscan_wait;
64171
64172-atomic_t fscache_n_marks;
64173-atomic_t fscache_n_uncaches;
64174+atomic_unchecked_t fscache_n_marks;
64175+atomic_unchecked_t fscache_n_uncaches;
64176
64177-atomic_t fscache_n_acquires;
64178-atomic_t fscache_n_acquires_null;
64179-atomic_t fscache_n_acquires_no_cache;
64180-atomic_t fscache_n_acquires_ok;
64181-atomic_t fscache_n_acquires_nobufs;
64182-atomic_t fscache_n_acquires_oom;
64183+atomic_unchecked_t fscache_n_acquires;
64184+atomic_unchecked_t fscache_n_acquires_null;
64185+atomic_unchecked_t fscache_n_acquires_no_cache;
64186+atomic_unchecked_t fscache_n_acquires_ok;
64187+atomic_unchecked_t fscache_n_acquires_nobufs;
64188+atomic_unchecked_t fscache_n_acquires_oom;
64189
64190-atomic_t fscache_n_invalidates;
64191-atomic_t fscache_n_invalidates_run;
64192+atomic_unchecked_t fscache_n_invalidates;
64193+atomic_unchecked_t fscache_n_invalidates_run;
64194
64195-atomic_t fscache_n_updates;
64196-atomic_t fscache_n_updates_null;
64197-atomic_t fscache_n_updates_run;
64198+atomic_unchecked_t fscache_n_updates;
64199+atomic_unchecked_t fscache_n_updates_null;
64200+atomic_unchecked_t fscache_n_updates_run;
64201
64202-atomic_t fscache_n_relinquishes;
64203-atomic_t fscache_n_relinquishes_null;
64204-atomic_t fscache_n_relinquishes_waitcrt;
64205-atomic_t fscache_n_relinquishes_retire;
64206+atomic_unchecked_t fscache_n_relinquishes;
64207+atomic_unchecked_t fscache_n_relinquishes_null;
64208+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
64209+atomic_unchecked_t fscache_n_relinquishes_retire;
64210
64211-atomic_t fscache_n_cookie_index;
64212-atomic_t fscache_n_cookie_data;
64213-atomic_t fscache_n_cookie_special;
64214+atomic_unchecked_t fscache_n_cookie_index;
64215+atomic_unchecked_t fscache_n_cookie_data;
64216+atomic_unchecked_t fscache_n_cookie_special;
64217
64218-atomic_t fscache_n_object_alloc;
64219-atomic_t fscache_n_object_no_alloc;
64220-atomic_t fscache_n_object_lookups;
64221-atomic_t fscache_n_object_lookups_negative;
64222-atomic_t fscache_n_object_lookups_positive;
64223-atomic_t fscache_n_object_lookups_timed_out;
64224-atomic_t fscache_n_object_created;
64225-atomic_t fscache_n_object_avail;
64226-atomic_t fscache_n_object_dead;
64227+atomic_unchecked_t fscache_n_object_alloc;
64228+atomic_unchecked_t fscache_n_object_no_alloc;
64229+atomic_unchecked_t fscache_n_object_lookups;
64230+atomic_unchecked_t fscache_n_object_lookups_negative;
64231+atomic_unchecked_t fscache_n_object_lookups_positive;
64232+atomic_unchecked_t fscache_n_object_lookups_timed_out;
64233+atomic_unchecked_t fscache_n_object_created;
64234+atomic_unchecked_t fscache_n_object_avail;
64235+atomic_unchecked_t fscache_n_object_dead;
64236
64237-atomic_t fscache_n_checkaux_none;
64238-atomic_t fscache_n_checkaux_okay;
64239-atomic_t fscache_n_checkaux_update;
64240-atomic_t fscache_n_checkaux_obsolete;
64241+atomic_unchecked_t fscache_n_checkaux_none;
64242+atomic_unchecked_t fscache_n_checkaux_okay;
64243+atomic_unchecked_t fscache_n_checkaux_update;
64244+atomic_unchecked_t fscache_n_checkaux_obsolete;
64245
64246 atomic_t fscache_n_cop_alloc_object;
64247 atomic_t fscache_n_cop_lookup_object;
64248@@ -138,118 +138,118 @@ static int fscache_stats_show(struct seq_file *m, void *v)
64249 seq_puts(m, "FS-Cache statistics\n");
64250
64251 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
64252- atomic_read(&fscache_n_cookie_index),
64253- atomic_read(&fscache_n_cookie_data),
64254- atomic_read(&fscache_n_cookie_special));
64255+ atomic_read_unchecked(&fscache_n_cookie_index),
64256+ atomic_read_unchecked(&fscache_n_cookie_data),
64257+ atomic_read_unchecked(&fscache_n_cookie_special));
64258
64259 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
64260- atomic_read(&fscache_n_object_alloc),
64261- atomic_read(&fscache_n_object_no_alloc),
64262- atomic_read(&fscache_n_object_avail),
64263- atomic_read(&fscache_n_object_dead));
64264+ atomic_read_unchecked(&fscache_n_object_alloc),
64265+ atomic_read_unchecked(&fscache_n_object_no_alloc),
64266+ atomic_read_unchecked(&fscache_n_object_avail),
64267+ atomic_read_unchecked(&fscache_n_object_dead));
64268 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
64269- atomic_read(&fscache_n_checkaux_none),
64270- atomic_read(&fscache_n_checkaux_okay),
64271- atomic_read(&fscache_n_checkaux_update),
64272- atomic_read(&fscache_n_checkaux_obsolete));
64273+ atomic_read_unchecked(&fscache_n_checkaux_none),
64274+ atomic_read_unchecked(&fscache_n_checkaux_okay),
64275+ atomic_read_unchecked(&fscache_n_checkaux_update),
64276+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
64277
64278 seq_printf(m, "Pages : mrk=%u unc=%u\n",
64279- atomic_read(&fscache_n_marks),
64280- atomic_read(&fscache_n_uncaches));
64281+ atomic_read_unchecked(&fscache_n_marks),
64282+ atomic_read_unchecked(&fscache_n_uncaches));
64283
64284 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
64285 " oom=%u\n",
64286- atomic_read(&fscache_n_acquires),
64287- atomic_read(&fscache_n_acquires_null),
64288- atomic_read(&fscache_n_acquires_no_cache),
64289- atomic_read(&fscache_n_acquires_ok),
64290- atomic_read(&fscache_n_acquires_nobufs),
64291- atomic_read(&fscache_n_acquires_oom));
64292+ atomic_read_unchecked(&fscache_n_acquires),
64293+ atomic_read_unchecked(&fscache_n_acquires_null),
64294+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
64295+ atomic_read_unchecked(&fscache_n_acquires_ok),
64296+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
64297+ atomic_read_unchecked(&fscache_n_acquires_oom));
64298
64299 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
64300- atomic_read(&fscache_n_object_lookups),
64301- atomic_read(&fscache_n_object_lookups_negative),
64302- atomic_read(&fscache_n_object_lookups_positive),
64303- atomic_read(&fscache_n_object_created),
64304- atomic_read(&fscache_n_object_lookups_timed_out));
64305+ atomic_read_unchecked(&fscache_n_object_lookups),
64306+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
64307+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
64308+ atomic_read_unchecked(&fscache_n_object_created),
64309+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
64310
64311 seq_printf(m, "Invals : n=%u run=%u\n",
64312- atomic_read(&fscache_n_invalidates),
64313- atomic_read(&fscache_n_invalidates_run));
64314+ atomic_read_unchecked(&fscache_n_invalidates),
64315+ atomic_read_unchecked(&fscache_n_invalidates_run));
64316
64317 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
64318- atomic_read(&fscache_n_updates),
64319- atomic_read(&fscache_n_updates_null),
64320- atomic_read(&fscache_n_updates_run));
64321+ atomic_read_unchecked(&fscache_n_updates),
64322+ atomic_read_unchecked(&fscache_n_updates_null),
64323+ atomic_read_unchecked(&fscache_n_updates_run));
64324
64325 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
64326- atomic_read(&fscache_n_relinquishes),
64327- atomic_read(&fscache_n_relinquishes_null),
64328- atomic_read(&fscache_n_relinquishes_waitcrt),
64329- atomic_read(&fscache_n_relinquishes_retire));
64330+ atomic_read_unchecked(&fscache_n_relinquishes),
64331+ atomic_read_unchecked(&fscache_n_relinquishes_null),
64332+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
64333+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
64334
64335 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
64336- atomic_read(&fscache_n_attr_changed),
64337- atomic_read(&fscache_n_attr_changed_ok),
64338- atomic_read(&fscache_n_attr_changed_nobufs),
64339- atomic_read(&fscache_n_attr_changed_nomem),
64340- atomic_read(&fscache_n_attr_changed_calls));
64341+ atomic_read_unchecked(&fscache_n_attr_changed),
64342+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
64343+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
64344+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
64345+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
64346
64347 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
64348- atomic_read(&fscache_n_allocs),
64349- atomic_read(&fscache_n_allocs_ok),
64350- atomic_read(&fscache_n_allocs_wait),
64351- atomic_read(&fscache_n_allocs_nobufs),
64352- atomic_read(&fscache_n_allocs_intr));
64353+ atomic_read_unchecked(&fscache_n_allocs),
64354+ atomic_read_unchecked(&fscache_n_allocs_ok),
64355+ atomic_read_unchecked(&fscache_n_allocs_wait),
64356+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
64357+ atomic_read_unchecked(&fscache_n_allocs_intr));
64358 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
64359- atomic_read(&fscache_n_alloc_ops),
64360- atomic_read(&fscache_n_alloc_op_waits),
64361- atomic_read(&fscache_n_allocs_object_dead));
64362+ atomic_read_unchecked(&fscache_n_alloc_ops),
64363+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
64364+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
64365
64366 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
64367 " int=%u oom=%u\n",
64368- atomic_read(&fscache_n_retrievals),
64369- atomic_read(&fscache_n_retrievals_ok),
64370- atomic_read(&fscache_n_retrievals_wait),
64371- atomic_read(&fscache_n_retrievals_nodata),
64372- atomic_read(&fscache_n_retrievals_nobufs),
64373- atomic_read(&fscache_n_retrievals_intr),
64374- atomic_read(&fscache_n_retrievals_nomem));
64375+ atomic_read_unchecked(&fscache_n_retrievals),
64376+ atomic_read_unchecked(&fscache_n_retrievals_ok),
64377+ atomic_read_unchecked(&fscache_n_retrievals_wait),
64378+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
64379+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
64380+ atomic_read_unchecked(&fscache_n_retrievals_intr),
64381+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
64382 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
64383- atomic_read(&fscache_n_retrieval_ops),
64384- atomic_read(&fscache_n_retrieval_op_waits),
64385- atomic_read(&fscache_n_retrievals_object_dead));
64386+ atomic_read_unchecked(&fscache_n_retrieval_ops),
64387+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
64388+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
64389
64390 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
64391- atomic_read(&fscache_n_stores),
64392- atomic_read(&fscache_n_stores_ok),
64393- atomic_read(&fscache_n_stores_again),
64394- atomic_read(&fscache_n_stores_nobufs),
64395- atomic_read(&fscache_n_stores_oom));
64396+ atomic_read_unchecked(&fscache_n_stores),
64397+ atomic_read_unchecked(&fscache_n_stores_ok),
64398+ atomic_read_unchecked(&fscache_n_stores_again),
64399+ atomic_read_unchecked(&fscache_n_stores_nobufs),
64400+ atomic_read_unchecked(&fscache_n_stores_oom));
64401 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
64402- atomic_read(&fscache_n_store_ops),
64403- atomic_read(&fscache_n_store_calls),
64404- atomic_read(&fscache_n_store_pages),
64405- atomic_read(&fscache_n_store_radix_deletes),
64406- atomic_read(&fscache_n_store_pages_over_limit));
64407+ atomic_read_unchecked(&fscache_n_store_ops),
64408+ atomic_read_unchecked(&fscache_n_store_calls),
64409+ atomic_read_unchecked(&fscache_n_store_pages),
64410+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
64411+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
64412
64413 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u wt=%u\n",
64414- atomic_read(&fscache_n_store_vmscan_not_storing),
64415- atomic_read(&fscache_n_store_vmscan_gone),
64416- atomic_read(&fscache_n_store_vmscan_busy),
64417- atomic_read(&fscache_n_store_vmscan_cancelled),
64418- atomic_read(&fscache_n_store_vmscan_wait));
64419+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
64420+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
64421+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
64422+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled),
64423+ atomic_read_unchecked(&fscache_n_store_vmscan_wait));
64424
64425 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
64426- atomic_read(&fscache_n_op_pend),
64427- atomic_read(&fscache_n_op_run),
64428- atomic_read(&fscache_n_op_enqueue),
64429- atomic_read(&fscache_n_op_cancelled),
64430- atomic_read(&fscache_n_op_rejected));
64431+ atomic_read_unchecked(&fscache_n_op_pend),
64432+ atomic_read_unchecked(&fscache_n_op_run),
64433+ atomic_read_unchecked(&fscache_n_op_enqueue),
64434+ atomic_read_unchecked(&fscache_n_op_cancelled),
64435+ atomic_read_unchecked(&fscache_n_op_rejected));
64436 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
64437- atomic_read(&fscache_n_op_deferred_release),
64438- atomic_read(&fscache_n_op_release),
64439- atomic_read(&fscache_n_op_gc));
64440+ atomic_read_unchecked(&fscache_n_op_deferred_release),
64441+ atomic_read_unchecked(&fscache_n_op_release),
64442+ atomic_read_unchecked(&fscache_n_op_gc));
64443
64444 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
64445 atomic_read(&fscache_n_cop_alloc_object),
64446diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
64447index 966ace8..030a03a 100644
64448--- a/fs/fuse/cuse.c
64449+++ b/fs/fuse/cuse.c
64450@@ -611,10 +611,12 @@ static int __init cuse_init(void)
64451 INIT_LIST_HEAD(&cuse_conntbl[i]);
64452
64453 /* inherit and extend fuse_dev_operations */
64454- cuse_channel_fops = fuse_dev_operations;
64455- cuse_channel_fops.owner = THIS_MODULE;
64456- cuse_channel_fops.open = cuse_channel_open;
64457- cuse_channel_fops.release = cuse_channel_release;
64458+ pax_open_kernel();
64459+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
64460+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
64461+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
64462+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
64463+ pax_close_kernel();
64464
64465 cuse_class = class_create(THIS_MODULE, "cuse");
64466 if (IS_ERR(cuse_class))
64467diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
64468index ca88731..8e9c55d 100644
64469--- a/fs/fuse/dev.c
64470+++ b/fs/fuse/dev.c
64471@@ -1318,7 +1318,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
64472 ret = 0;
64473 pipe_lock(pipe);
64474
64475- if (!pipe->readers) {
64476+ if (!atomic_read(&pipe->readers)) {
64477 send_sig(SIGPIPE, current, 0);
64478 if (!ret)
64479 ret = -EPIPE;
64480@@ -1347,7 +1347,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
64481 page_nr++;
64482 ret += buf->len;
64483
64484- if (pipe->files)
64485+ if (atomic_read(&pipe->files))
64486 do_wakeup = 1;
64487 }
64488
64489diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
64490index de1d84a..fd69c0c 100644
64491--- a/fs/fuse/dir.c
64492+++ b/fs/fuse/dir.c
64493@@ -1479,7 +1479,7 @@ static char *read_link(struct dentry *dentry)
64494 return link;
64495 }
64496
64497-static void free_link(char *link)
64498+static void free_link(const char *link)
64499 {
64500 if (!IS_ERR(link))
64501 free_page((unsigned long) link);
64502diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
64503index fd62cae..3494dfa 100644
64504--- a/fs/hostfs/hostfs_kern.c
64505+++ b/fs/hostfs/hostfs_kern.c
64506@@ -908,7 +908,7 @@ static void *hostfs_follow_link(struct dentry *dentry, struct nameidata *nd)
64507
64508 static void hostfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
64509 {
64510- char *s = nd_get_link(nd);
64511+ const char *s = nd_get_link(nd);
64512 if (!IS_ERR(s))
64513 __putname(s);
64514 }
64515diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
64516index 1e2872b..7aea000 100644
64517--- a/fs/hugetlbfs/inode.c
64518+++ b/fs/hugetlbfs/inode.c
64519@@ -154,6 +154,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
64520 struct mm_struct *mm = current->mm;
64521 struct vm_area_struct *vma;
64522 struct hstate *h = hstate_file(file);
64523+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
64524 struct vm_unmapped_area_info info;
64525
64526 if (len & ~huge_page_mask(h))
64527@@ -167,17 +168,26 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
64528 return addr;
64529 }
64530
64531+#ifdef CONFIG_PAX_RANDMMAP
64532+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
64533+#endif
64534+
64535 if (addr) {
64536 addr = ALIGN(addr, huge_page_size(h));
64537 vma = find_vma(mm, addr);
64538- if (TASK_SIZE - len >= addr &&
64539- (!vma || addr + len <= vma->vm_start))
64540+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
64541 return addr;
64542 }
64543
64544 info.flags = 0;
64545 info.length = len;
64546 info.low_limit = TASK_UNMAPPED_BASE;
64547+
64548+#ifdef CONFIG_PAX_RANDMMAP
64549+ if (mm->pax_flags & MF_PAX_RANDMMAP)
64550+ info.low_limit += mm->delta_mmap;
64551+#endif
64552+
64553 info.high_limit = TASK_SIZE;
64554 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
64555 info.align_offset = 0;
64556@@ -919,7 +929,7 @@ static struct file_system_type hugetlbfs_fs_type = {
64557 };
64558 MODULE_ALIAS_FS("hugetlbfs");
64559
64560-static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
64561+struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
64562
64563 static int can_do_hugetlb_shm(void)
64564 {
64565diff --git a/fs/inode.c b/fs/inode.c
64566index 26753ba..d19eb34 100644
64567--- a/fs/inode.c
64568+++ b/fs/inode.c
64569@@ -840,16 +840,20 @@ unsigned int get_next_ino(void)
64570 unsigned int *p = &get_cpu_var(last_ino);
64571 unsigned int res = *p;
64572
64573+start:
64574+
64575 #ifdef CONFIG_SMP
64576 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
64577- static atomic_t shared_last_ino;
64578- int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
64579+ static atomic_unchecked_t shared_last_ino;
64580+ int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
64581
64582 res = next - LAST_INO_BATCH;
64583 }
64584 #endif
64585
64586- *p = ++res;
64587+ if (unlikely(!++res))
64588+ goto start; /* never zero */
64589+ *p = res;
64590 put_cpu_var(last_ino);
64591 return res;
64592 }
64593diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
64594index 4a6cf28..d3a29d3 100644
64595--- a/fs/jffs2/erase.c
64596+++ b/fs/jffs2/erase.c
64597@@ -452,7 +452,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
64598 struct jffs2_unknown_node marker = {
64599 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
64600 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
64601- .totlen = cpu_to_je32(c->cleanmarker_size)
64602+ .totlen = cpu_to_je32(c->cleanmarker_size),
64603+ .hdr_crc = cpu_to_je32(0)
64604 };
64605
64606 jffs2_prealloc_raw_node_refs(c, jeb, 1);
64607diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
64608index a6597d6..41b30ec 100644
64609--- a/fs/jffs2/wbuf.c
64610+++ b/fs/jffs2/wbuf.c
64611@@ -1023,7 +1023,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
64612 {
64613 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
64614 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
64615- .totlen = constant_cpu_to_je32(8)
64616+ .totlen = constant_cpu_to_je32(8),
64617+ .hdr_crc = constant_cpu_to_je32(0)
64618 };
64619
64620 /*
64621diff --git a/fs/jfs/super.c b/fs/jfs/super.c
64622index adf8cb0..bb935fa 100644
64623--- a/fs/jfs/super.c
64624+++ b/fs/jfs/super.c
64625@@ -893,7 +893,7 @@ static int __init init_jfs_fs(void)
64626
64627 jfs_inode_cachep =
64628 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
64629- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
64630+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
64631 init_once);
64632 if (jfs_inode_cachep == NULL)
64633 return -ENOMEM;
64634diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c
64635index a693f5b..82276a1 100644
64636--- a/fs/kernfs/dir.c
64637+++ b/fs/kernfs/dir.c
64638@@ -182,7 +182,7 @@ struct kernfs_node *kernfs_get_parent(struct kernfs_node *kn)
64639 *
64640 * Returns 31 bit hash of ns + name (so it fits in an off_t )
64641 */
64642-static unsigned int kernfs_name_hash(const char *name, const void *ns)
64643+static unsigned int kernfs_name_hash(const unsigned char *name, const void *ns)
64644 {
64645 unsigned long hash = init_name_hash();
64646 unsigned int len = strlen(name);
64647diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
64648index 4429d6d..9831f52 100644
64649--- a/fs/kernfs/file.c
64650+++ b/fs/kernfs/file.c
64651@@ -34,7 +34,7 @@ static DEFINE_MUTEX(kernfs_open_file_mutex);
64652
64653 struct kernfs_open_node {
64654 atomic_t refcnt;
64655- atomic_t event;
64656+ atomic_unchecked_t event;
64657 wait_queue_head_t poll;
64658 struct list_head files; /* goes through kernfs_open_file.list */
64659 };
64660@@ -163,7 +163,7 @@ static int kernfs_seq_show(struct seq_file *sf, void *v)
64661 {
64662 struct kernfs_open_file *of = sf->private;
64663
64664- of->event = atomic_read(&of->kn->attr.open->event);
64665+ of->event = atomic_read_unchecked(&of->kn->attr.open->event);
64666
64667 return of->kn->attr.ops->seq_show(sf, v);
64668 }
64669@@ -375,12 +375,12 @@ static int kernfs_vma_page_mkwrite(struct vm_area_struct *vma,
64670 return ret;
64671 }
64672
64673-static int kernfs_vma_access(struct vm_area_struct *vma, unsigned long addr,
64674- void *buf, int len, int write)
64675+static ssize_t kernfs_vma_access(struct vm_area_struct *vma, unsigned long addr,
64676+ void *buf, size_t len, int write)
64677 {
64678 struct file *file = vma->vm_file;
64679 struct kernfs_open_file *of = kernfs_of(file);
64680- int ret;
64681+ ssize_t ret;
64682
64683 if (!of->vm_ops)
64684 return -EINVAL;
64685@@ -581,7 +581,7 @@ static int kernfs_get_open_node(struct kernfs_node *kn,
64686 return -ENOMEM;
64687
64688 atomic_set(&new_on->refcnt, 0);
64689- atomic_set(&new_on->event, 1);
64690+ atomic_set_unchecked(&new_on->event, 1);
64691 init_waitqueue_head(&new_on->poll);
64692 INIT_LIST_HEAD(&new_on->files);
64693 goto retry;
64694@@ -787,7 +787,7 @@ static unsigned int kernfs_fop_poll(struct file *filp, poll_table *wait)
64695
64696 kernfs_put_active(kn);
64697
64698- if (of->event != atomic_read(&on->event))
64699+ if (of->event != atomic_read_unchecked(&on->event))
64700 goto trigger;
64701
64702 return DEFAULT_POLLMASK;
64703@@ -818,7 +818,7 @@ repeat:
64704
64705 on = kn->attr.open;
64706 if (on) {
64707- atomic_inc(&on->event);
64708+ atomic_inc_unchecked(&on->event);
64709 wake_up_interruptible(&on->poll);
64710 }
64711
64712diff --git a/fs/kernfs/symlink.c b/fs/kernfs/symlink.c
64713index 8a19889..4c3069a 100644
64714--- a/fs/kernfs/symlink.c
64715+++ b/fs/kernfs/symlink.c
64716@@ -128,7 +128,7 @@ static void *kernfs_iop_follow_link(struct dentry *dentry, struct nameidata *nd)
64717 static void kernfs_iop_put_link(struct dentry *dentry, struct nameidata *nd,
64718 void *cookie)
64719 {
64720- char *page = nd_get_link(nd);
64721+ const char *page = nd_get_link(nd);
64722 if (!IS_ERR(page))
64723 free_page((unsigned long)page);
64724 }
64725diff --git a/fs/libfs.c b/fs/libfs.c
64726index 88e3e00..979c262 100644
64727--- a/fs/libfs.c
64728+++ b/fs/libfs.c
64729@@ -160,6 +160,9 @@ int dcache_readdir(struct file *file, struct dir_context *ctx)
64730
64731 for (p = q->next; p != &dentry->d_subdirs; p = p->next) {
64732 struct dentry *next = list_entry(p, struct dentry, d_u.d_child);
64733+ char d_name[sizeof(next->d_iname)];
64734+ const unsigned char *name;
64735+
64736 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
64737 if (!simple_positive(next)) {
64738 spin_unlock(&next->d_lock);
64739@@ -168,7 +171,12 @@ int dcache_readdir(struct file *file, struct dir_context *ctx)
64740
64741 spin_unlock(&next->d_lock);
64742 spin_unlock(&dentry->d_lock);
64743- if (!dir_emit(ctx, next->d_name.name, next->d_name.len,
64744+ name = next->d_name.name;
64745+ if (name == next->d_iname) {
64746+ memcpy(d_name, name, next->d_name.len);
64747+ name = d_name;
64748+ }
64749+ if (!dir_emit(ctx, name, next->d_name.len,
64750 next->d_inode->i_ino, dt_type(next->d_inode)))
64751 return 0;
64752 spin_lock(&dentry->d_lock);
64753@@ -1027,7 +1035,7 @@ EXPORT_SYMBOL(noop_fsync);
64754 void kfree_put_link(struct dentry *dentry, struct nameidata *nd,
64755 void *cookie)
64756 {
64757- char *s = nd_get_link(nd);
64758+ const char *s = nd_get_link(nd);
64759 if (!IS_ERR(s))
64760 kfree(s);
64761 }
64762diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
64763index acd3947..1f896e2 100644
64764--- a/fs/lockd/clntproc.c
64765+++ b/fs/lockd/clntproc.c
64766@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
64767 /*
64768 * Cookie counter for NLM requests
64769 */
64770-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
64771+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
64772
64773 void nlmclnt_next_cookie(struct nlm_cookie *c)
64774 {
64775- u32 cookie = atomic_inc_return(&nlm_cookie);
64776+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
64777
64778 memcpy(c->data, &cookie, 4);
64779 c->len=4;
64780diff --git a/fs/locks.c b/fs/locks.c
64781index bb08857..f65e8bf 100644
64782--- a/fs/locks.c
64783+++ b/fs/locks.c
64784@@ -2350,7 +2350,7 @@ void locks_remove_file(struct file *filp)
64785 locks_remove_posix(filp, filp);
64786
64787 if (filp->f_op->flock) {
64788- struct file_lock fl = {
64789+ struct file_lock flock = {
64790 .fl_owner = filp,
64791 .fl_pid = current->tgid,
64792 .fl_file = filp,
64793@@ -2358,9 +2358,9 @@ void locks_remove_file(struct file *filp)
64794 .fl_type = F_UNLCK,
64795 .fl_end = OFFSET_MAX,
64796 };
64797- filp->f_op->flock(filp, F_SETLKW, &fl);
64798- if (fl.fl_ops && fl.fl_ops->fl_release_private)
64799- fl.fl_ops->fl_release_private(&fl);
64800+ filp->f_op->flock(filp, F_SETLKW, &flock);
64801+ if (flock.fl_ops && flock.fl_ops->fl_release_private)
64802+ flock.fl_ops->fl_release_private(&flock);
64803 }
64804
64805 spin_lock(&inode->i_lock);
64806diff --git a/fs/mount.h b/fs/mount.h
64807index 6740a62..ccb472f 100644
64808--- a/fs/mount.h
64809+++ b/fs/mount.h
64810@@ -11,7 +11,7 @@ struct mnt_namespace {
64811 u64 seq; /* Sequence number to prevent loops */
64812 wait_queue_head_t poll;
64813 u64 event;
64814-};
64815+} __randomize_layout;
64816
64817 struct mnt_pcp {
64818 int mnt_count;
64819@@ -57,7 +57,7 @@ struct mount {
64820 int mnt_expiry_mark; /* true if marked for expiry */
64821 struct hlist_head mnt_pins;
64822 struct path mnt_ex_mountpoint;
64823-};
64824+} __randomize_layout;
64825
64826 #define MNT_NS_INTERNAL ERR_PTR(-EINVAL) /* distinct from any mnt_namespace */
64827
64828diff --git a/fs/namei.c b/fs/namei.c
64829index 3ddb044..5533df9 100644
64830--- a/fs/namei.c
64831+++ b/fs/namei.c
64832@@ -331,17 +331,32 @@ int generic_permission(struct inode *inode, int mask)
64833 if (ret != -EACCES)
64834 return ret;
64835
64836+#ifdef CONFIG_GRKERNSEC
64837+ /* we'll block if we have to log due to a denied capability use */
64838+ if (mask & MAY_NOT_BLOCK)
64839+ return -ECHILD;
64840+#endif
64841+
64842 if (S_ISDIR(inode->i_mode)) {
64843 /* DACs are overridable for directories */
64844- if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE))
64845- return 0;
64846 if (!(mask & MAY_WRITE))
64847- if (capable_wrt_inode_uidgid(inode,
64848- CAP_DAC_READ_SEARCH))
64849+ if (capable_wrt_inode_uidgid_nolog(inode, CAP_DAC_OVERRIDE) ||
64850+ capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH))
64851 return 0;
64852+ if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE))
64853+ return 0;
64854 return -EACCES;
64855 }
64856 /*
64857+ * Searching includes executable on directories, else just read.
64858+ */
64859+ mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
64860+ if (mask == MAY_READ)
64861+ if (capable_wrt_inode_uidgid_nolog(inode, CAP_DAC_OVERRIDE) ||
64862+ capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH))
64863+ return 0;
64864+
64865+ /*
64866 * Read/write DACs are always overridable.
64867 * Executable DACs are overridable when there is
64868 * at least one exec bit set.
64869@@ -350,14 +365,6 @@ int generic_permission(struct inode *inode, int mask)
64870 if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE))
64871 return 0;
64872
64873- /*
64874- * Searching includes executable on directories, else just read.
64875- */
64876- mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
64877- if (mask == MAY_READ)
64878- if (capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH))
64879- return 0;
64880-
64881 return -EACCES;
64882 }
64883 EXPORT_SYMBOL(generic_permission);
64884@@ -823,7 +830,7 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
64885 {
64886 struct dentry *dentry = link->dentry;
64887 int error;
64888- char *s;
64889+ const char *s;
64890
64891 BUG_ON(nd->flags & LOOKUP_RCU);
64892
64893@@ -844,6 +851,12 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
64894 if (error)
64895 goto out_put_nd_path;
64896
64897+ if (gr_handle_follow_link(dentry->d_parent->d_inode,
64898+ dentry->d_inode, dentry, nd->path.mnt)) {
64899+ error = -EACCES;
64900+ goto out_put_nd_path;
64901+ }
64902+
64903 nd->last_type = LAST_BIND;
64904 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
64905 error = PTR_ERR(*p);
64906@@ -1607,6 +1620,8 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
64907 if (res)
64908 break;
64909 res = walk_component(nd, path, LOOKUP_FOLLOW);
64910+ if (res >= 0 && gr_handle_symlink_owner(&link, nd->inode))
64911+ res = -EACCES;
64912 put_link(nd, &link, cookie);
64913 } while (res > 0);
64914
64915@@ -1679,7 +1694,7 @@ EXPORT_SYMBOL(full_name_hash);
64916 static inline u64 hash_name(const char *name)
64917 {
64918 unsigned long a, b, adata, bdata, mask, hash, len;
64919- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
64920+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
64921
64922 hash = a = 0;
64923 len = -sizeof(unsigned long);
64924@@ -1968,6 +1983,8 @@ static int path_lookupat(int dfd, const char *name,
64925 if (err)
64926 break;
64927 err = lookup_last(nd, &path);
64928+ if (!err && gr_handle_symlink_owner(&link, nd->inode))
64929+ err = -EACCES;
64930 put_link(nd, &link, cookie);
64931 }
64932 }
64933@@ -1975,6 +1992,13 @@ static int path_lookupat(int dfd, const char *name,
64934 if (!err)
64935 err = complete_walk(nd);
64936
64937+ if (!err && !(nd->flags & LOOKUP_PARENT)) {
64938+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
64939+ path_put(&nd->path);
64940+ err = -ENOENT;
64941+ }
64942+ }
64943+
64944 if (!err && nd->flags & LOOKUP_DIRECTORY) {
64945 if (!d_can_lookup(nd->path.dentry)) {
64946 path_put(&nd->path);
64947@@ -2002,8 +2026,15 @@ static int filename_lookup(int dfd, struct filename *name,
64948 retval = path_lookupat(dfd, name->name,
64949 flags | LOOKUP_REVAL, nd);
64950
64951- if (likely(!retval))
64952+ if (likely(!retval)) {
64953 audit_inode(name, nd->path.dentry, flags & LOOKUP_PARENT);
64954+ if (name->name[0] != '/' && nd->path.dentry && nd->inode) {
64955+ if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt)) {
64956+ path_put(&nd->path);
64957+ return -ENOENT;
64958+ }
64959+ }
64960+ }
64961 return retval;
64962 }
64963
64964@@ -2585,6 +2616,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
64965 if (flag & O_NOATIME && !inode_owner_or_capable(inode))
64966 return -EPERM;
64967
64968+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode))
64969+ return -EPERM;
64970+ if (gr_handle_rawio(inode))
64971+ return -EPERM;
64972+ if (!gr_acl_handle_open(dentry, path->mnt, acc_mode))
64973+ return -EACCES;
64974+
64975 return 0;
64976 }
64977
64978@@ -2816,7 +2854,7 @@ looked_up:
64979 * cleared otherwise prior to returning.
64980 */
64981 static int lookup_open(struct nameidata *nd, struct path *path,
64982- struct file *file,
64983+ struct path *link, struct file *file,
64984 const struct open_flags *op,
64985 bool got_write, int *opened)
64986 {
64987@@ -2851,6 +2889,17 @@ static int lookup_open(struct nameidata *nd, struct path *path,
64988 /* Negative dentry, just create the file */
64989 if (!dentry->d_inode && (op->open_flag & O_CREAT)) {
64990 umode_t mode = op->mode;
64991+
64992+ if (link && gr_handle_symlink_owner(link, dir->d_inode)) {
64993+ error = -EACCES;
64994+ goto out_dput;
64995+ }
64996+
64997+ if (!gr_acl_handle_creat(dentry, dir, nd->path.mnt, op->open_flag, op->acc_mode, mode)) {
64998+ error = -EACCES;
64999+ goto out_dput;
65000+ }
65001+
65002 if (!IS_POSIXACL(dir->d_inode))
65003 mode &= ~current_umask();
65004 /*
65005@@ -2872,6 +2921,8 @@ static int lookup_open(struct nameidata *nd, struct path *path,
65006 nd->flags & LOOKUP_EXCL);
65007 if (error)
65008 goto out_dput;
65009+ else
65010+ gr_handle_create(dentry, nd->path.mnt);
65011 }
65012 out_no_open:
65013 path->dentry = dentry;
65014@@ -2886,7 +2937,7 @@ out_dput:
65015 /*
65016 * Handle the last step of open()
65017 */
65018-static int do_last(struct nameidata *nd, struct path *path,
65019+static int do_last(struct nameidata *nd, struct path *path, struct path *link,
65020 struct file *file, const struct open_flags *op,
65021 int *opened, struct filename *name)
65022 {
65023@@ -2936,6 +2987,15 @@ static int do_last(struct nameidata *nd, struct path *path,
65024 if (error)
65025 return error;
65026
65027+ if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
65028+ error = -ENOENT;
65029+ goto out;
65030+ }
65031+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
65032+ error = -EACCES;
65033+ goto out;
65034+ }
65035+
65036 audit_inode(name, dir, LOOKUP_PARENT);
65037 error = -EISDIR;
65038 /* trailing slashes? */
65039@@ -2955,7 +3015,7 @@ retry_lookup:
65040 */
65041 }
65042 mutex_lock(&dir->d_inode->i_mutex);
65043- error = lookup_open(nd, path, file, op, got_write, opened);
65044+ error = lookup_open(nd, path, link, file, op, got_write, opened);
65045 mutex_unlock(&dir->d_inode->i_mutex);
65046
65047 if (error <= 0) {
65048@@ -2979,11 +3039,28 @@ retry_lookup:
65049 goto finish_open_created;
65050 }
65051
65052+ if (!gr_acl_handle_hidden_file(path->dentry, nd->path.mnt)) {
65053+ error = -ENOENT;
65054+ goto exit_dput;
65055+ }
65056+ if (link && gr_handle_symlink_owner(link, path->dentry->d_inode)) {
65057+ error = -EACCES;
65058+ goto exit_dput;
65059+ }
65060+
65061 /*
65062 * create/update audit record if it already exists.
65063 */
65064- if (d_is_positive(path->dentry))
65065+ if (d_is_positive(path->dentry)) {
65066+ /* only check if O_CREAT is specified, all other checks need to go
65067+ into may_open */
65068+ if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
65069+ error = -EACCES;
65070+ goto exit_dput;
65071+ }
65072+
65073 audit_inode(name, path->dentry, 0);
65074+ }
65075
65076 /*
65077 * If atomic_open() acquired write access it is dropped now due to
65078@@ -3024,6 +3101,11 @@ finish_lookup:
65079 }
65080 }
65081 BUG_ON(inode != path->dentry->d_inode);
65082+ /* if we're resolving a symlink to another symlink */
65083+ if (link && gr_handle_symlink_owner(link, inode)) {
65084+ error = -EACCES;
65085+ goto out;
65086+ }
65087 return 1;
65088 }
65089
65090@@ -3033,7 +3115,6 @@ finish_lookup:
65091 save_parent.dentry = nd->path.dentry;
65092 save_parent.mnt = mntget(path->mnt);
65093 nd->path.dentry = path->dentry;
65094-
65095 }
65096 nd->inode = inode;
65097 /* Why this, you ask? _Now_ we might have grown LOOKUP_JUMPED... */
65098@@ -3043,7 +3124,18 @@ finish_open:
65099 path_put(&save_parent);
65100 return error;
65101 }
65102+
65103+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
65104+ error = -ENOENT;
65105+ goto out;
65106+ }
65107+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
65108+ error = -EACCES;
65109+ goto out;
65110+ }
65111+
65112 audit_inode(name, nd->path.dentry, 0);
65113+
65114 error = -EISDIR;
65115 if ((open_flag & O_CREAT) && d_is_dir(nd->path.dentry))
65116 goto out;
65117@@ -3206,7 +3298,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
65118 if (unlikely(error))
65119 goto out;
65120
65121- error = do_last(nd, &path, file, op, &opened, pathname);
65122+ error = do_last(nd, &path, NULL, file, op, &opened, pathname);
65123 while (unlikely(error > 0)) { /* trailing symlink */
65124 struct path link = path;
65125 void *cookie;
65126@@ -3224,7 +3316,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
65127 error = follow_link(&link, nd, &cookie);
65128 if (unlikely(error))
65129 break;
65130- error = do_last(nd, &path, file, op, &opened, pathname);
65131+ error = do_last(nd, &path, &link, file, op, &opened, pathname);
65132 put_link(nd, &link, cookie);
65133 }
65134 out:
65135@@ -3324,9 +3416,11 @@ struct dentry *kern_path_create(int dfd, const char *pathname,
65136 goto unlock;
65137
65138 error = -EEXIST;
65139- if (d_is_positive(dentry))
65140+ if (d_is_positive(dentry)) {
65141+ if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt))
65142+ error = -ENOENT;
65143 goto fail;
65144-
65145+ }
65146 /*
65147 * Special case - lookup gave negative, but... we had foo/bar/
65148 * From the vfs_mknod() POV we just have a negative dentry -
65149@@ -3378,6 +3472,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname,
65150 }
65151 EXPORT_SYMBOL(user_path_create);
65152
65153+static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, struct filename **to, unsigned int lookup_flags)
65154+{
65155+ struct filename *tmp = getname(pathname);
65156+ struct dentry *res;
65157+ if (IS_ERR(tmp))
65158+ return ERR_CAST(tmp);
65159+ res = kern_path_create(dfd, tmp->name, path, lookup_flags);
65160+ if (IS_ERR(res))
65161+ putname(tmp);
65162+ else
65163+ *to = tmp;
65164+ return res;
65165+}
65166+
65167 int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
65168 {
65169 int error = may_create(dir, dentry);
65170@@ -3441,6 +3549,17 @@ retry:
65171
65172 if (!IS_POSIXACL(path.dentry->d_inode))
65173 mode &= ~current_umask();
65174+
65175+ if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
65176+ error = -EPERM;
65177+ goto out;
65178+ }
65179+
65180+ if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
65181+ error = -EACCES;
65182+ goto out;
65183+ }
65184+
65185 error = security_path_mknod(&path, dentry, mode, dev);
65186 if (error)
65187 goto out;
65188@@ -3456,6 +3575,8 @@ retry:
65189 error = vfs_mknod(path.dentry->d_inode,dentry,mode,0);
65190 break;
65191 }
65192+ if (!error)
65193+ gr_handle_create(dentry, path.mnt);
65194 out:
65195 done_path_create(&path, dentry);
65196 if (retry_estale(error, lookup_flags)) {
65197@@ -3510,9 +3631,16 @@ retry:
65198
65199 if (!IS_POSIXACL(path.dentry->d_inode))
65200 mode &= ~current_umask();
65201+ if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
65202+ error = -EACCES;
65203+ goto out;
65204+ }
65205 error = security_path_mkdir(&path, dentry, mode);
65206 if (!error)
65207 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
65208+ if (!error)
65209+ gr_handle_create(dentry, path.mnt);
65210+out:
65211 done_path_create(&path, dentry);
65212 if (retry_estale(error, lookup_flags)) {
65213 lookup_flags |= LOOKUP_REVAL;
65214@@ -3595,6 +3723,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
65215 struct filename *name;
65216 struct dentry *dentry;
65217 struct nameidata nd;
65218+ ino_t saved_ino = 0;
65219+ dev_t saved_dev = 0;
65220 unsigned int lookup_flags = 0;
65221 retry:
65222 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
65223@@ -3627,10 +3757,21 @@ retry:
65224 error = -ENOENT;
65225 goto exit3;
65226 }
65227+
65228+ saved_ino = dentry->d_inode->i_ino;
65229+ saved_dev = gr_get_dev_from_dentry(dentry);
65230+
65231+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
65232+ error = -EACCES;
65233+ goto exit3;
65234+ }
65235+
65236 error = security_path_rmdir(&nd.path, dentry);
65237 if (error)
65238 goto exit3;
65239 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
65240+ if (!error && (saved_dev || saved_ino))
65241+ gr_handle_delete(saved_ino, saved_dev);
65242 exit3:
65243 dput(dentry);
65244 exit2:
65245@@ -3721,6 +3862,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
65246 struct nameidata nd;
65247 struct inode *inode = NULL;
65248 struct inode *delegated_inode = NULL;
65249+ ino_t saved_ino = 0;
65250+ dev_t saved_dev = 0;
65251 unsigned int lookup_flags = 0;
65252 retry:
65253 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
65254@@ -3747,10 +3890,22 @@ retry_deleg:
65255 if (d_is_negative(dentry))
65256 goto slashes;
65257 ihold(inode);
65258+
65259+ if (inode->i_nlink <= 1) {
65260+ saved_ino = inode->i_ino;
65261+ saved_dev = gr_get_dev_from_dentry(dentry);
65262+ }
65263+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
65264+ error = -EACCES;
65265+ goto exit2;
65266+ }
65267+
65268 error = security_path_unlink(&nd.path, dentry);
65269 if (error)
65270 goto exit2;
65271 error = vfs_unlink(nd.path.dentry->d_inode, dentry, &delegated_inode);
65272+ if (!error && (saved_ino || saved_dev))
65273+ gr_handle_delete(saved_ino, saved_dev);
65274 exit2:
65275 dput(dentry);
65276 }
65277@@ -3839,9 +3994,17 @@ retry:
65278 if (IS_ERR(dentry))
65279 goto out_putname;
65280
65281+ if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
65282+ error = -EACCES;
65283+ goto out;
65284+ }
65285+
65286 error = security_path_symlink(&path, dentry, from->name);
65287 if (!error)
65288 error = vfs_symlink(path.dentry->d_inode, dentry, from->name);
65289+ if (!error)
65290+ gr_handle_create(dentry, path.mnt);
65291+out:
65292 done_path_create(&path, dentry);
65293 if (retry_estale(error, lookup_flags)) {
65294 lookup_flags |= LOOKUP_REVAL;
65295@@ -3945,6 +4108,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
65296 struct dentry *new_dentry;
65297 struct path old_path, new_path;
65298 struct inode *delegated_inode = NULL;
65299+ struct filename *to = NULL;
65300 int how = 0;
65301 int error;
65302
65303@@ -3968,7 +4132,7 @@ retry:
65304 if (error)
65305 return error;
65306
65307- new_dentry = user_path_create(newdfd, newname, &new_path,
65308+ new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to,
65309 (how & LOOKUP_REVAL));
65310 error = PTR_ERR(new_dentry);
65311 if (IS_ERR(new_dentry))
65312@@ -3980,11 +4144,28 @@ retry:
65313 error = may_linkat(&old_path);
65314 if (unlikely(error))
65315 goto out_dput;
65316+
65317+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
65318+ old_path.dentry->d_inode,
65319+ old_path.dentry->d_inode->i_mode, to)) {
65320+ error = -EACCES;
65321+ goto out_dput;
65322+ }
65323+
65324+ if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
65325+ old_path.dentry, old_path.mnt, to)) {
65326+ error = -EACCES;
65327+ goto out_dput;
65328+ }
65329+
65330 error = security_path_link(old_path.dentry, &new_path, new_dentry);
65331 if (error)
65332 goto out_dput;
65333 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry, &delegated_inode);
65334+ if (!error)
65335+ gr_handle_create(new_dentry, new_path.mnt);
65336 out_dput:
65337+ putname(to);
65338 done_path_create(&new_path, new_dentry);
65339 if (delegated_inode) {
65340 error = break_deleg_wait(&delegated_inode);
65341@@ -4295,6 +4476,12 @@ retry_deleg:
65342 if (new_dentry == trap)
65343 goto exit5;
65344
65345+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
65346+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
65347+ to, flags);
65348+ if (error)
65349+ goto exit5;
65350+
65351 error = security_path_rename(&oldnd.path, old_dentry,
65352 &newnd.path, new_dentry, flags);
65353 if (error)
65354@@ -4302,6 +4489,9 @@ retry_deleg:
65355 error = vfs_rename(old_dir->d_inode, old_dentry,
65356 new_dir->d_inode, new_dentry,
65357 &delegated_inode, flags);
65358+ if (!error)
65359+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
65360+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0, flags);
65361 exit5:
65362 dput(new_dentry);
65363 exit4:
65364@@ -4344,14 +4534,24 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
65365
65366 int readlink_copy(char __user *buffer, int buflen, const char *link)
65367 {
65368+ char tmpbuf[64];
65369+ const char *newlink;
65370 int len = PTR_ERR(link);
65371+
65372 if (IS_ERR(link))
65373 goto out;
65374
65375 len = strlen(link);
65376 if (len > (unsigned) buflen)
65377 len = buflen;
65378- if (copy_to_user(buffer, link, len))
65379+
65380+ if (len < sizeof(tmpbuf)) {
65381+ memcpy(tmpbuf, link, len);
65382+ newlink = tmpbuf;
65383+ } else
65384+ newlink = link;
65385+
65386+ if (copy_to_user(buffer, newlink, len))
65387 len = -EFAULT;
65388 out:
65389 return len;
65390diff --git a/fs/namespace.c b/fs/namespace.c
65391index 7f67b46..c4ad324 100644
65392--- a/fs/namespace.c
65393+++ b/fs/namespace.c
65394@@ -1362,6 +1362,9 @@ static int do_umount(struct mount *mnt, int flags)
65395 if (!(sb->s_flags & MS_RDONLY))
65396 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
65397 up_write(&sb->s_umount);
65398+
65399+ gr_log_remount(mnt->mnt_devname, retval);
65400+
65401 return retval;
65402 }
65403
65404@@ -1384,6 +1387,9 @@ static int do_umount(struct mount *mnt, int flags)
65405 }
65406 unlock_mount_hash();
65407 namespace_unlock();
65408+
65409+ gr_log_unmount(mnt->mnt_devname, retval);
65410+
65411 return retval;
65412 }
65413
65414@@ -1403,7 +1409,7 @@ static inline bool may_mount(void)
65415 * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD
65416 */
65417
65418-SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
65419+SYSCALL_DEFINE2(umount, const char __user *, name, int, flags)
65420 {
65421 struct path path;
65422 struct mount *mnt;
65423@@ -1445,7 +1451,7 @@ out:
65424 /*
65425 * The 2.0 compatible umount. No flags.
65426 */
65427-SYSCALL_DEFINE1(oldumount, char __user *, name)
65428+SYSCALL_DEFINE1(oldumount, const char __user *, name)
65429 {
65430 return sys_umount(name, 0);
65431 }
65432@@ -2494,6 +2500,16 @@ long do_mount(const char *dev_name, const char *dir_name,
65433 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
65434 MS_STRICTATIME);
65435
65436+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
65437+ retval = -EPERM;
65438+ goto dput_out;
65439+ }
65440+
65441+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
65442+ retval = -EPERM;
65443+ goto dput_out;
65444+ }
65445+
65446 if (flags & MS_REMOUNT)
65447 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
65448 data_page);
65449@@ -2508,6 +2524,9 @@ long do_mount(const char *dev_name, const char *dir_name,
65450 dev_name, data_page);
65451 dput_out:
65452 path_put(&path);
65453+
65454+ gr_log_mount(dev_name, dir_name, retval);
65455+
65456 return retval;
65457 }
65458
65459@@ -2525,7 +2544,7 @@ static void free_mnt_ns(struct mnt_namespace *ns)
65460 * number incrementing at 10Ghz will take 12,427 years to wrap which
65461 * is effectively never, so we can ignore the possibility.
65462 */
65463-static atomic64_t mnt_ns_seq = ATOMIC64_INIT(1);
65464+static atomic64_unchecked_t mnt_ns_seq = ATOMIC64_INIT(1);
65465
65466 static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
65467 {
65468@@ -2540,7 +2559,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
65469 kfree(new_ns);
65470 return ERR_PTR(ret);
65471 }
65472- new_ns->seq = atomic64_add_return(1, &mnt_ns_seq);
65473+ new_ns->seq = atomic64_inc_return_unchecked(&mnt_ns_seq);
65474 atomic_set(&new_ns->count, 1);
65475 new_ns->root = NULL;
65476 INIT_LIST_HEAD(&new_ns->list);
65477@@ -2550,7 +2569,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
65478 return new_ns;
65479 }
65480
65481-struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
65482+__latent_entropy struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
65483 struct user_namespace *user_ns, struct fs_struct *new_fs)
65484 {
65485 struct mnt_namespace *new_ns;
65486@@ -2671,8 +2690,8 @@ struct dentry *mount_subtree(struct vfsmount *mnt, const char *name)
65487 }
65488 EXPORT_SYMBOL(mount_subtree);
65489
65490-SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name,
65491- char __user *, type, unsigned long, flags, void __user *, data)
65492+SYSCALL_DEFINE5(mount, const char __user *, dev_name, const char __user *, dir_name,
65493+ const char __user *, type, unsigned long, flags, void __user *, data)
65494 {
65495 int ret;
65496 char *kernel_type;
65497@@ -2785,6 +2804,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
65498 if (error)
65499 goto out2;
65500
65501+ if (gr_handle_chroot_pivot()) {
65502+ error = -EPERM;
65503+ goto out2;
65504+ }
65505+
65506 get_fs_root(current->fs, &root);
65507 old_mp = lock_mount(&old);
65508 error = PTR_ERR(old_mp);
65509@@ -2822,6 +2846,9 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
65510 /* make sure we can reach put_old from new_root */
65511 if (!is_path_reachable(old_mnt, old.dentry, &new))
65512 goto out4;
65513+ /* make certain new is below the root */
65514+ if (!is_path_reachable(new_mnt, new.dentry, &root))
65515+ goto out4;
65516 root_mp->m_count++; /* pin it so it won't go away */
65517 lock_mount_hash();
65518 detach_mnt(new_mnt, &parent_path);
65519@@ -3053,7 +3080,7 @@ static int mntns_install(struct nsproxy *nsproxy, void *ns)
65520 !ns_capable(current_user_ns(), CAP_SYS_ADMIN))
65521 return -EPERM;
65522
65523- if (fs->users != 1)
65524+ if (atomic_read(&fs->users) != 1)
65525 return -EINVAL;
65526
65527 get_mnt_ns(mnt_ns);
65528diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
65529index f4ccfe6..a5cf064 100644
65530--- a/fs/nfs/callback_xdr.c
65531+++ b/fs/nfs/callback_xdr.c
65532@@ -51,7 +51,7 @@ struct callback_op {
65533 callback_decode_arg_t decode_args;
65534 callback_encode_res_t encode_res;
65535 long res_maxsize;
65536-};
65537+} __do_const;
65538
65539 static struct callback_op callback_ops[];
65540
65541diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
65542index 577a36f..1cde799 100644
65543--- a/fs/nfs/inode.c
65544+++ b/fs/nfs/inode.c
65545@@ -1228,16 +1228,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
65546 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
65547 }
65548
65549-static atomic_long_t nfs_attr_generation_counter;
65550+static atomic_long_unchecked_t nfs_attr_generation_counter;
65551
65552 static unsigned long nfs_read_attr_generation_counter(void)
65553 {
65554- return atomic_long_read(&nfs_attr_generation_counter);
65555+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
65556 }
65557
65558 unsigned long nfs_inc_attr_generation_counter(void)
65559 {
65560- return atomic_long_inc_return(&nfs_attr_generation_counter);
65561+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
65562 }
65563
65564 void nfs_fattr_init(struct nfs_fattr *fattr)
65565diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
65566index 5e0dc52..64681bc 100644
65567--- a/fs/nfsd/nfs4proc.c
65568+++ b/fs/nfsd/nfs4proc.c
65569@@ -1155,7 +1155,7 @@ struct nfsd4_operation {
65570 nfsd4op_rsize op_rsize_bop;
65571 stateid_getter op_get_currentstateid;
65572 stateid_setter op_set_currentstateid;
65573-};
65574+} __do_const;
65575
65576 static struct nfsd4_operation nfsd4_ops[];
65577
65578diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
65579index 353aac8..32035ee 100644
65580--- a/fs/nfsd/nfs4xdr.c
65581+++ b/fs/nfsd/nfs4xdr.c
65582@@ -1534,7 +1534,7 @@ nfsd4_decode_notsupp(struct nfsd4_compoundargs *argp, void *p)
65583
65584 typedef __be32(*nfsd4_dec)(struct nfsd4_compoundargs *argp, void *);
65585
65586-static nfsd4_dec nfsd4_dec_ops[] = {
65587+static const nfsd4_dec nfsd4_dec_ops[] = {
65588 [OP_ACCESS] = (nfsd4_dec)nfsd4_decode_access,
65589 [OP_CLOSE] = (nfsd4_dec)nfsd4_decode_close,
65590 [OP_COMMIT] = (nfsd4_dec)nfsd4_decode_commit,
65591diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
65592index ff95676..96cf3f62 100644
65593--- a/fs/nfsd/nfscache.c
65594+++ b/fs/nfsd/nfscache.c
65595@@ -527,17 +527,20 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
65596 {
65597 struct svc_cacherep *rp = rqstp->rq_cacherep;
65598 struct kvec *resv = &rqstp->rq_res.head[0], *cachv;
65599- int len;
65600+ long len;
65601 size_t bufsize = 0;
65602
65603 if (!rp)
65604 return;
65605
65606- len = resv->iov_len - ((char*)statp - (char*)resv->iov_base);
65607- len >>= 2;
65608+ if (statp) {
65609+ len = (char*)statp - (char*)resv->iov_base;
65610+ len = resv->iov_len - len;
65611+ len >>= 2;
65612+ }
65613
65614 /* Don't cache excessive amounts of data and XDR failures */
65615- if (!statp || len > (256 >> 2)) {
65616+ if (!statp || len > (256 >> 2) || len < 0) {
65617 nfsd_reply_cache_free(rp);
65618 return;
65619 }
65620@@ -545,7 +548,7 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
65621 switch (cachetype) {
65622 case RC_REPLSTAT:
65623 if (len != 1)
65624- printk("nfsd: RC_REPLSTAT/reply len %d!\n",len);
65625+ printk("nfsd: RC_REPLSTAT/reply len %ld!\n",len);
65626 rp->c_replstat = *statp;
65627 break;
65628 case RC_REPLBUFF:
65629diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
65630index 6ab077b..5ac7f0b 100644
65631--- a/fs/nfsd/vfs.c
65632+++ b/fs/nfsd/vfs.c
65633@@ -855,7 +855,7 @@ __be32 nfsd_readv(struct file *file, loff_t offset, struct kvec *vec, int vlen,
65634
65635 oldfs = get_fs();
65636 set_fs(KERNEL_DS);
65637- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
65638+ host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
65639 set_fs(oldfs);
65640 return nfsd_finish_read(file, count, host_err);
65641 }
65642@@ -943,7 +943,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
65643
65644 /* Write the data. */
65645 oldfs = get_fs(); set_fs(KERNEL_DS);
65646- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &pos);
65647+ host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &pos);
65648 set_fs(oldfs);
65649 if (host_err < 0)
65650 goto out_nfserr;
65651@@ -1485,7 +1485,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
65652 */
65653
65654 oldfs = get_fs(); set_fs(KERNEL_DS);
65655- host_err = inode->i_op->readlink(path.dentry, (char __user *)buf, *lenp);
65656+ host_err = inode->i_op->readlink(path.dentry, (char __force_user *)buf, *lenp);
65657 set_fs(oldfs);
65658
65659 if (host_err < 0)
65660diff --git a/fs/nls/nls_base.c b/fs/nls/nls_base.c
65661index 52ccd34..7a6b202 100644
65662--- a/fs/nls/nls_base.c
65663+++ b/fs/nls/nls_base.c
65664@@ -234,21 +234,25 @@ EXPORT_SYMBOL(utf16s_to_utf8s);
65665
65666 int __register_nls(struct nls_table *nls, struct module *owner)
65667 {
65668- struct nls_table ** tmp = &tables;
65669+ struct nls_table *tmp = tables;
65670
65671 if (nls->next)
65672 return -EBUSY;
65673
65674- nls->owner = owner;
65675+ pax_open_kernel();
65676+ *(void **)&nls->owner = owner;
65677+ pax_close_kernel();
65678 spin_lock(&nls_lock);
65679- while (*tmp) {
65680- if (nls == *tmp) {
65681+ while (tmp) {
65682+ if (nls == tmp) {
65683 spin_unlock(&nls_lock);
65684 return -EBUSY;
65685 }
65686- tmp = &(*tmp)->next;
65687+ tmp = tmp->next;
65688 }
65689- nls->next = tables;
65690+ pax_open_kernel();
65691+ *(struct nls_table **)&nls->next = tables;
65692+ pax_close_kernel();
65693 tables = nls;
65694 spin_unlock(&nls_lock);
65695 return 0;
65696@@ -257,12 +261,14 @@ EXPORT_SYMBOL(__register_nls);
65697
65698 int unregister_nls(struct nls_table * nls)
65699 {
65700- struct nls_table ** tmp = &tables;
65701+ struct nls_table * const * tmp = &tables;
65702
65703 spin_lock(&nls_lock);
65704 while (*tmp) {
65705 if (nls == *tmp) {
65706- *tmp = nls->next;
65707+ pax_open_kernel();
65708+ *(struct nls_table **)tmp = nls->next;
65709+ pax_close_kernel();
65710 spin_unlock(&nls_lock);
65711 return 0;
65712 }
65713@@ -272,7 +278,7 @@ int unregister_nls(struct nls_table * nls)
65714 return -EINVAL;
65715 }
65716
65717-static struct nls_table *find_nls(char *charset)
65718+static struct nls_table *find_nls(const char *charset)
65719 {
65720 struct nls_table *nls;
65721 spin_lock(&nls_lock);
65722@@ -288,7 +294,7 @@ static struct nls_table *find_nls(char *charset)
65723 return nls;
65724 }
65725
65726-struct nls_table *load_nls(char *charset)
65727+struct nls_table *load_nls(const char *charset)
65728 {
65729 return try_then_request_module(find_nls(charset), "nls_%s", charset);
65730 }
65731diff --git a/fs/nls/nls_euc-jp.c b/fs/nls/nls_euc-jp.c
65732index 162b3f1..6076a7c 100644
65733--- a/fs/nls/nls_euc-jp.c
65734+++ b/fs/nls/nls_euc-jp.c
65735@@ -560,8 +560,10 @@ static int __init init_nls_euc_jp(void)
65736 p_nls = load_nls("cp932");
65737
65738 if (p_nls) {
65739- table.charset2upper = p_nls->charset2upper;
65740- table.charset2lower = p_nls->charset2lower;
65741+ pax_open_kernel();
65742+ *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
65743+ *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
65744+ pax_close_kernel();
65745 return register_nls(&table);
65746 }
65747
65748diff --git a/fs/nls/nls_koi8-ru.c b/fs/nls/nls_koi8-ru.c
65749index a80a741..7b96e1b 100644
65750--- a/fs/nls/nls_koi8-ru.c
65751+++ b/fs/nls/nls_koi8-ru.c
65752@@ -62,8 +62,10 @@ static int __init init_nls_koi8_ru(void)
65753 p_nls = load_nls("koi8-u");
65754
65755 if (p_nls) {
65756- table.charset2upper = p_nls->charset2upper;
65757- table.charset2lower = p_nls->charset2lower;
65758+ pax_open_kernel();
65759+ *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
65760+ *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
65761+ pax_close_kernel();
65762 return register_nls(&table);
65763 }
65764
65765diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
65766index c991616..5ae51af 100644
65767--- a/fs/notify/fanotify/fanotify_user.c
65768+++ b/fs/notify/fanotify/fanotify_user.c
65769@@ -216,8 +216,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
65770
65771 fd = fanotify_event_metadata.fd;
65772 ret = -EFAULT;
65773- if (copy_to_user(buf, &fanotify_event_metadata,
65774- fanotify_event_metadata.event_len))
65775+ if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
65776+ copy_to_user(buf, &fanotify_event_metadata, fanotify_event_metadata.event_len))
65777 goto out_close_fd;
65778
65779 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
65780diff --git a/fs/notify/inotify/inotify_fsnotify.c b/fs/notify/inotify/inotify_fsnotify.c
65781index 0f88bc0..7d888d7 100644
65782--- a/fs/notify/inotify/inotify_fsnotify.c
65783+++ b/fs/notify/inotify/inotify_fsnotify.c
65784@@ -165,8 +165,10 @@ static void inotify_free_group_priv(struct fsnotify_group *group)
65785 /* ideally the idr is empty and we won't hit the BUG in the callback */
65786 idr_for_each(&group->inotify_data.idr, idr_callback, group);
65787 idr_destroy(&group->inotify_data.idr);
65788- atomic_dec(&group->inotify_data.user->inotify_devs);
65789- free_uid(group->inotify_data.user);
65790+ if (group->inotify_data.user) {
65791+ atomic_dec(&group->inotify_data.user->inotify_devs);
65792+ free_uid(group->inotify_data.user);
65793+ }
65794 }
65795
65796 static void inotify_free_event(struct fsnotify_event *fsn_event)
65797diff --git a/fs/notify/notification.c b/fs/notify/notification.c
65798index a95d8e0..a91a5fd 100644
65799--- a/fs/notify/notification.c
65800+++ b/fs/notify/notification.c
65801@@ -48,7 +48,7 @@
65802 #include <linux/fsnotify_backend.h>
65803 #include "fsnotify.h"
65804
65805-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
65806+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
65807
65808 /**
65809 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
65810@@ -56,7 +56,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
65811 */
65812 u32 fsnotify_get_cookie(void)
65813 {
65814- return atomic_inc_return(&fsnotify_sync_cookie);
65815+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
65816 }
65817 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
65818
65819diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
65820index 9e38daf..5727cae 100644
65821--- a/fs/ntfs/dir.c
65822+++ b/fs/ntfs/dir.c
65823@@ -1310,7 +1310,7 @@ find_next_index_buffer:
65824 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
65825 ~(s64)(ndir->itype.index.block_size - 1)));
65826 /* Bounds checks. */
65827- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
65828+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
65829 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
65830 "inode 0x%lx or driver bug.", vdir->i_ino);
65831 goto err_out;
65832diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
65833index f5ec1ce..807fd78 100644
65834--- a/fs/ntfs/file.c
65835+++ b/fs/ntfs/file.c
65836@@ -1279,7 +1279,7 @@ static inline size_t ntfs_copy_from_user(struct page **pages,
65837 char *addr;
65838 size_t total = 0;
65839 unsigned len;
65840- int left;
65841+ unsigned left;
65842
65843 do {
65844 len = PAGE_CACHE_SIZE - ofs;
65845diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c
65846index 6c3296e..c0b99f0 100644
65847--- a/fs/ntfs/super.c
65848+++ b/fs/ntfs/super.c
65849@@ -688,7 +688,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
65850 if (!silent)
65851 ntfs_error(sb, "Primary boot sector is invalid.");
65852 } else if (!silent)
65853- ntfs_error(sb, read_err_str, "primary");
65854+ ntfs_error(sb, read_err_str, "%s", "primary");
65855 if (!(NTFS_SB(sb)->on_errors & ON_ERRORS_RECOVER)) {
65856 if (bh_primary)
65857 brelse(bh_primary);
65858@@ -704,7 +704,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
65859 goto hotfix_primary_boot_sector;
65860 brelse(bh_backup);
65861 } else if (!silent)
65862- ntfs_error(sb, read_err_str, "backup");
65863+ ntfs_error(sb, read_err_str, "%s", "backup");
65864 /* Try to read NT3.51- backup boot sector. */
65865 if ((bh_backup = sb_bread(sb, nr_blocks >> 1))) {
65866 if (is_boot_sector_ntfs(sb, (NTFS_BOOT_SECTOR*)
65867@@ -715,7 +715,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
65868 "sector.");
65869 brelse(bh_backup);
65870 } else if (!silent)
65871- ntfs_error(sb, read_err_str, "backup");
65872+ ntfs_error(sb, read_err_str, "%s", "backup");
65873 /* We failed. Cleanup and return. */
65874 if (bh_primary)
65875 brelse(bh_primary);
65876diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
65877index 0440134..d52c93a 100644
65878--- a/fs/ocfs2/localalloc.c
65879+++ b/fs/ocfs2/localalloc.c
65880@@ -1320,7 +1320,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
65881 goto bail;
65882 }
65883
65884- atomic_inc(&osb->alloc_stats.moves);
65885+ atomic_inc_unchecked(&osb->alloc_stats.moves);
65886
65887 bail:
65888 if (handle)
65889diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
65890index 8add6f1..b931e04 100644
65891--- a/fs/ocfs2/namei.c
65892+++ b/fs/ocfs2/namei.c
65893@@ -158,7 +158,7 @@ bail_add:
65894 * NOTE: This dentry already has ->d_op set from
65895 * ocfs2_get_parent() and ocfs2_get_dentry()
65896 */
65897- if (ret)
65898+ if (!IS_ERR_OR_NULL(ret))
65899 dentry = ret;
65900
65901 status = ocfs2_dentry_attach_lock(dentry, inode,
65902diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
65903index bbec539..7b266d5 100644
65904--- a/fs/ocfs2/ocfs2.h
65905+++ b/fs/ocfs2/ocfs2.h
65906@@ -236,11 +236,11 @@ enum ocfs2_vol_state
65907
65908 struct ocfs2_alloc_stats
65909 {
65910- atomic_t moves;
65911- atomic_t local_data;
65912- atomic_t bitmap_data;
65913- atomic_t bg_allocs;
65914- atomic_t bg_extends;
65915+ atomic_unchecked_t moves;
65916+ atomic_unchecked_t local_data;
65917+ atomic_unchecked_t bitmap_data;
65918+ atomic_unchecked_t bg_allocs;
65919+ atomic_unchecked_t bg_extends;
65920 };
65921
65922 enum ocfs2_local_alloc_state
65923diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
65924index 0cb889a..6a26b24 100644
65925--- a/fs/ocfs2/suballoc.c
65926+++ b/fs/ocfs2/suballoc.c
65927@@ -867,7 +867,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
65928 mlog_errno(status);
65929 goto bail;
65930 }
65931- atomic_inc(&osb->alloc_stats.bg_extends);
65932+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
65933
65934 /* You should never ask for this much metadata */
65935 BUG_ON(bits_wanted >
65936@@ -2014,7 +2014,7 @@ int ocfs2_claim_metadata(handle_t *handle,
65937 mlog_errno(status);
65938 goto bail;
65939 }
65940- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
65941+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
65942
65943 *suballoc_loc = res.sr_bg_blkno;
65944 *suballoc_bit_start = res.sr_bit_offset;
65945@@ -2180,7 +2180,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
65946 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
65947 res->sr_bits);
65948
65949- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
65950+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
65951
65952 BUG_ON(res->sr_bits != 1);
65953
65954@@ -2222,7 +2222,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
65955 mlog_errno(status);
65956 goto bail;
65957 }
65958- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
65959+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
65960
65961 BUG_ON(res.sr_bits != 1);
65962
65963@@ -2326,7 +2326,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
65964 cluster_start,
65965 num_clusters);
65966 if (!status)
65967- atomic_inc(&osb->alloc_stats.local_data);
65968+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
65969 } else {
65970 if (min_clusters > (osb->bitmap_cpg - 1)) {
65971 /* The only paths asking for contiguousness
65972@@ -2352,7 +2352,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
65973 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
65974 res.sr_bg_blkno,
65975 res.sr_bit_offset);
65976- atomic_inc(&osb->alloc_stats.bitmap_data);
65977+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
65978 *num_clusters = res.sr_bits;
65979 }
65980 }
65981diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
65982index 4142546..69375a9 100644
65983--- a/fs/ocfs2/super.c
65984+++ b/fs/ocfs2/super.c
65985@@ -300,11 +300,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
65986 "%10s => GlobalAllocs: %d LocalAllocs: %d "
65987 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
65988 "Stats",
65989- atomic_read(&osb->alloc_stats.bitmap_data),
65990- atomic_read(&osb->alloc_stats.local_data),
65991- atomic_read(&osb->alloc_stats.bg_allocs),
65992- atomic_read(&osb->alloc_stats.moves),
65993- atomic_read(&osb->alloc_stats.bg_extends));
65994+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
65995+ atomic_read_unchecked(&osb->alloc_stats.local_data),
65996+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
65997+ atomic_read_unchecked(&osb->alloc_stats.moves),
65998+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
65999
66000 out += snprintf(buf + out, len - out,
66001 "%10s => State: %u Descriptor: %llu Size: %u bits "
66002@@ -2100,11 +2100,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
66003
66004 mutex_init(&osb->system_file_mutex);
66005
66006- atomic_set(&osb->alloc_stats.moves, 0);
66007- atomic_set(&osb->alloc_stats.local_data, 0);
66008- atomic_set(&osb->alloc_stats.bitmap_data, 0);
66009- atomic_set(&osb->alloc_stats.bg_allocs, 0);
66010- atomic_set(&osb->alloc_stats.bg_extends, 0);
66011+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
66012+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
66013+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
66014+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
66015+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
66016
66017 /* Copy the blockcheck stats from the superblock probe */
66018 osb->osb_ecc_stats = *stats;
66019diff --git a/fs/open.c b/fs/open.c
66020index d6fd3ac..6ccf474 100644
66021--- a/fs/open.c
66022+++ b/fs/open.c
66023@@ -32,6 +32,8 @@
66024 #include <linux/dnotify.h>
66025 #include <linux/compat.h>
66026
66027+#define CREATE_TRACE_POINTS
66028+#include <trace/events/fs.h>
66029 #include "internal.h"
66030
66031 int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs,
66032@@ -103,6 +105,8 @@ long vfs_truncate(struct path *path, loff_t length)
66033 error = locks_verify_truncate(inode, NULL, length);
66034 if (!error)
66035 error = security_path_truncate(path);
66036+ if (!error && !gr_acl_handle_truncate(path->dentry, path->mnt))
66037+ error = -EACCES;
66038 if (!error)
66039 error = do_truncate(path->dentry, length, 0, NULL);
66040
66041@@ -187,6 +191,8 @@ static long do_sys_ftruncate(unsigned int fd, loff_t length, int small)
66042 error = locks_verify_truncate(inode, f.file, length);
66043 if (!error)
66044 error = security_path_truncate(&f.file->f_path);
66045+ if (!error && !gr_acl_handle_truncate(f.file->f_path.dentry, f.file->f_path.mnt))
66046+ error = -EACCES;
66047 if (!error)
66048 error = do_truncate(dentry, length, ATTR_MTIME|ATTR_CTIME, f.file);
66049 sb_end_write(inode->i_sb);
66050@@ -380,6 +386,9 @@ retry:
66051 if (__mnt_is_readonly(path.mnt))
66052 res = -EROFS;
66053
66054+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
66055+ res = -EACCES;
66056+
66057 out_path_release:
66058 path_put(&path);
66059 if (retry_estale(res, lookup_flags)) {
66060@@ -411,6 +420,8 @@ retry:
66061 if (error)
66062 goto dput_and_out;
66063
66064+ gr_log_chdir(path.dentry, path.mnt);
66065+
66066 set_fs_pwd(current->fs, &path);
66067
66068 dput_and_out:
66069@@ -440,6 +451,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
66070 goto out_putf;
66071
66072 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
66073+
66074+ if (!error && !gr_chroot_fchdir(f.file->f_path.dentry, f.file->f_path.mnt))
66075+ error = -EPERM;
66076+
66077+ if (!error)
66078+ gr_log_chdir(f.file->f_path.dentry, f.file->f_path.mnt);
66079+
66080 if (!error)
66081 set_fs_pwd(current->fs, &f.file->f_path);
66082 out_putf:
66083@@ -469,7 +487,13 @@ retry:
66084 if (error)
66085 goto dput_and_out;
66086
66087+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
66088+ goto dput_and_out;
66089+
66090 set_fs_root(current->fs, &path);
66091+
66092+ gr_handle_chroot_chdir(&path);
66093+
66094 error = 0;
66095 dput_and_out:
66096 path_put(&path);
66097@@ -493,6 +517,16 @@ static int chmod_common(struct path *path, umode_t mode)
66098 return error;
66099 retry_deleg:
66100 mutex_lock(&inode->i_mutex);
66101+
66102+ if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
66103+ error = -EACCES;
66104+ goto out_unlock;
66105+ }
66106+ if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
66107+ error = -EACCES;
66108+ goto out_unlock;
66109+ }
66110+
66111 error = security_path_chmod(path, mode);
66112 if (error)
66113 goto out_unlock;
66114@@ -558,6 +592,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
66115 uid = make_kuid(current_user_ns(), user);
66116 gid = make_kgid(current_user_ns(), group);
66117
66118+ if (!gr_acl_handle_chown(path->dentry, path->mnt))
66119+ return -EACCES;
66120+
66121 newattrs.ia_valid = ATTR_CTIME;
66122 if (user != (uid_t) -1) {
66123 if (!uid_valid(uid))
66124@@ -983,6 +1020,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
66125 } else {
66126 fsnotify_open(f);
66127 fd_install(fd, f);
66128+ trace_do_sys_open(tmp->name, flags, mode);
66129 }
66130 }
66131 putname(tmp);
66132diff --git a/fs/pipe.c b/fs/pipe.c
66133index 21981e5..3d5f55c 100644
66134--- a/fs/pipe.c
66135+++ b/fs/pipe.c
66136@@ -56,7 +56,7 @@ unsigned int pipe_min_size = PAGE_SIZE;
66137
66138 static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass)
66139 {
66140- if (pipe->files)
66141+ if (atomic_read(&pipe->files))
66142 mutex_lock_nested(&pipe->mutex, subclass);
66143 }
66144
66145@@ -71,7 +71,7 @@ EXPORT_SYMBOL(pipe_lock);
66146
66147 void pipe_unlock(struct pipe_inode_info *pipe)
66148 {
66149- if (pipe->files)
66150+ if (atomic_read(&pipe->files))
66151 mutex_unlock(&pipe->mutex);
66152 }
66153 EXPORT_SYMBOL(pipe_unlock);
66154@@ -292,9 +292,9 @@ pipe_read(struct kiocb *iocb, struct iov_iter *to)
66155 }
66156 if (bufs) /* More to do? */
66157 continue;
66158- if (!pipe->writers)
66159+ if (!atomic_read(&pipe->writers))
66160 break;
66161- if (!pipe->waiting_writers) {
66162+ if (!atomic_read(&pipe->waiting_writers)) {
66163 /* syscall merging: Usually we must not sleep
66164 * if O_NONBLOCK is set, or if we got some data.
66165 * But if a writer sleeps in kernel space, then
66166@@ -351,7 +351,7 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
66167
66168 __pipe_lock(pipe);
66169
66170- if (!pipe->readers) {
66171+ if (!atomic_read(&pipe->readers)) {
66172 send_sig(SIGPIPE, current, 0);
66173 ret = -EPIPE;
66174 goto out;
66175@@ -387,7 +387,7 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
66176 for (;;) {
66177 int bufs;
66178
66179- if (!pipe->readers) {
66180+ if (!atomic_read(&pipe->readers)) {
66181 send_sig(SIGPIPE, current, 0);
66182 if (!ret)
66183 ret = -EPIPE;
66184@@ -455,9 +455,9 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
66185 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
66186 do_wakeup = 0;
66187 }
66188- pipe->waiting_writers++;
66189+ atomic_inc(&pipe->waiting_writers);
66190 pipe_wait(pipe);
66191- pipe->waiting_writers--;
66192+ atomic_dec(&pipe->waiting_writers);
66193 }
66194 out:
66195 __pipe_unlock(pipe);
66196@@ -512,7 +512,7 @@ pipe_poll(struct file *filp, poll_table *wait)
66197 mask = 0;
66198 if (filp->f_mode & FMODE_READ) {
66199 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
66200- if (!pipe->writers && filp->f_version != pipe->w_counter)
66201+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
66202 mask |= POLLHUP;
66203 }
66204
66205@@ -522,7 +522,7 @@ pipe_poll(struct file *filp, poll_table *wait)
66206 * Most Unices do not set POLLERR for FIFOs but on Linux they
66207 * behave exactly like pipes for poll().
66208 */
66209- if (!pipe->readers)
66210+ if (!atomic_read(&pipe->readers))
66211 mask |= POLLERR;
66212 }
66213
66214@@ -534,7 +534,7 @@ static void put_pipe_info(struct inode *inode, struct pipe_inode_info *pipe)
66215 int kill = 0;
66216
66217 spin_lock(&inode->i_lock);
66218- if (!--pipe->files) {
66219+ if (atomic_dec_and_test(&pipe->files)) {
66220 inode->i_pipe = NULL;
66221 kill = 1;
66222 }
66223@@ -551,11 +551,11 @@ pipe_release(struct inode *inode, struct file *file)
66224
66225 __pipe_lock(pipe);
66226 if (file->f_mode & FMODE_READ)
66227- pipe->readers--;
66228+ atomic_dec(&pipe->readers);
66229 if (file->f_mode & FMODE_WRITE)
66230- pipe->writers--;
66231+ atomic_dec(&pipe->writers);
66232
66233- if (pipe->readers || pipe->writers) {
66234+ if (atomic_read(&pipe->readers) || atomic_read(&pipe->writers)) {
66235 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
66236 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
66237 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
66238@@ -620,7 +620,7 @@ void free_pipe_info(struct pipe_inode_info *pipe)
66239 kfree(pipe);
66240 }
66241
66242-static struct vfsmount *pipe_mnt __read_mostly;
66243+struct vfsmount *pipe_mnt __read_mostly;
66244
66245 /*
66246 * pipefs_dname() is called from d_path().
66247@@ -650,8 +650,9 @@ static struct inode * get_pipe_inode(void)
66248 goto fail_iput;
66249
66250 inode->i_pipe = pipe;
66251- pipe->files = 2;
66252- pipe->readers = pipe->writers = 1;
66253+ atomic_set(&pipe->files, 2);
66254+ atomic_set(&pipe->readers, 1);
66255+ atomic_set(&pipe->writers, 1);
66256 inode->i_fop = &pipefifo_fops;
66257
66258 /*
66259@@ -830,17 +831,17 @@ static int fifo_open(struct inode *inode, struct file *filp)
66260 spin_lock(&inode->i_lock);
66261 if (inode->i_pipe) {
66262 pipe = inode->i_pipe;
66263- pipe->files++;
66264+ atomic_inc(&pipe->files);
66265 spin_unlock(&inode->i_lock);
66266 } else {
66267 spin_unlock(&inode->i_lock);
66268 pipe = alloc_pipe_info();
66269 if (!pipe)
66270 return -ENOMEM;
66271- pipe->files = 1;
66272+ atomic_set(&pipe->files, 1);
66273 spin_lock(&inode->i_lock);
66274 if (unlikely(inode->i_pipe)) {
66275- inode->i_pipe->files++;
66276+ atomic_inc(&inode->i_pipe->files);
66277 spin_unlock(&inode->i_lock);
66278 free_pipe_info(pipe);
66279 pipe = inode->i_pipe;
66280@@ -865,10 +866,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
66281 * opened, even when there is no process writing the FIFO.
66282 */
66283 pipe->r_counter++;
66284- if (pipe->readers++ == 0)
66285+ if (atomic_inc_return(&pipe->readers) == 1)
66286 wake_up_partner(pipe);
66287
66288- if (!is_pipe && !pipe->writers) {
66289+ if (!is_pipe && !atomic_read(&pipe->writers)) {
66290 if ((filp->f_flags & O_NONBLOCK)) {
66291 /* suppress POLLHUP until we have
66292 * seen a writer */
66293@@ -887,14 +888,14 @@ static int fifo_open(struct inode *inode, struct file *filp)
66294 * errno=ENXIO when there is no process reading the FIFO.
66295 */
66296 ret = -ENXIO;
66297- if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !pipe->readers)
66298+ if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
66299 goto err;
66300
66301 pipe->w_counter++;
66302- if (!pipe->writers++)
66303+ if (atomic_inc_return(&pipe->writers) == 1)
66304 wake_up_partner(pipe);
66305
66306- if (!is_pipe && !pipe->readers) {
66307+ if (!is_pipe && !atomic_read(&pipe->readers)) {
66308 if (wait_for_partner(pipe, &pipe->r_counter))
66309 goto err_wr;
66310 }
66311@@ -908,11 +909,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
66312 * the process can at least talk to itself.
66313 */
66314
66315- pipe->readers++;
66316- pipe->writers++;
66317+ atomic_inc(&pipe->readers);
66318+ atomic_inc(&pipe->writers);
66319 pipe->r_counter++;
66320 pipe->w_counter++;
66321- if (pipe->readers == 1 || pipe->writers == 1)
66322+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
66323 wake_up_partner(pipe);
66324 break;
66325
66326@@ -926,13 +927,13 @@ static int fifo_open(struct inode *inode, struct file *filp)
66327 return 0;
66328
66329 err_rd:
66330- if (!--pipe->readers)
66331+ if (atomic_dec_and_test(&pipe->readers))
66332 wake_up_interruptible(&pipe->wait);
66333 ret = -ERESTARTSYS;
66334 goto err;
66335
66336 err_wr:
66337- if (!--pipe->writers)
66338+ if (atomic_dec_and_test(&pipe->writers))
66339 wake_up_interruptible(&pipe->wait);
66340 ret = -ERESTARTSYS;
66341 goto err;
66342diff --git a/fs/posix_acl.c b/fs/posix_acl.c
66343index 0855f77..6787d50 100644
66344--- a/fs/posix_acl.c
66345+++ b/fs/posix_acl.c
66346@@ -20,6 +20,7 @@
66347 #include <linux/xattr.h>
66348 #include <linux/export.h>
66349 #include <linux/user_namespace.h>
66350+#include <linux/grsecurity.h>
66351
66352 struct posix_acl **acl_by_type(struct inode *inode, int type)
66353 {
66354@@ -277,7 +278,7 @@ posix_acl_equiv_mode(const struct posix_acl *acl, umode_t *mode_p)
66355 }
66356 }
66357 if (mode_p)
66358- *mode_p = (*mode_p & ~S_IRWXUGO) | mode;
66359+ *mode_p = ((*mode_p & ~S_IRWXUGO) | mode) & ~gr_acl_umask();
66360 return not_equiv;
66361 }
66362 EXPORT_SYMBOL(posix_acl_equiv_mode);
66363@@ -427,7 +428,7 @@ static int posix_acl_create_masq(struct posix_acl *acl, umode_t *mode_p)
66364 mode &= (group_obj->e_perm << 3) | ~S_IRWXG;
66365 }
66366
66367- *mode_p = (*mode_p & ~S_IRWXUGO) | mode;
66368+ *mode_p = ((*mode_p & ~S_IRWXUGO) | mode) & ~gr_acl_umask();
66369 return not_equiv;
66370 }
66371
66372@@ -485,6 +486,8 @@ __posix_acl_create(struct posix_acl **acl, gfp_t gfp, umode_t *mode_p)
66373 struct posix_acl *clone = posix_acl_clone(*acl, gfp);
66374 int err = -ENOMEM;
66375 if (clone) {
66376+ *mode_p &= ~gr_acl_umask();
66377+
66378 err = posix_acl_create_masq(clone, mode_p);
66379 if (err < 0) {
66380 posix_acl_release(clone);
66381@@ -659,11 +662,12 @@ struct posix_acl *
66382 posix_acl_from_xattr(struct user_namespace *user_ns,
66383 const void *value, size_t size)
66384 {
66385- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
66386- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
66387+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
66388+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
66389 int count;
66390 struct posix_acl *acl;
66391 struct posix_acl_entry *acl_e;
66392+ umode_t umask = gr_acl_umask();
66393
66394 if (!value)
66395 return NULL;
66396@@ -689,12 +693,18 @@ posix_acl_from_xattr(struct user_namespace *user_ns,
66397
66398 switch(acl_e->e_tag) {
66399 case ACL_USER_OBJ:
66400+ acl_e->e_perm &= ~((umask & S_IRWXU) >> 6);
66401+ break;
66402 case ACL_GROUP_OBJ:
66403 case ACL_MASK:
66404+ acl_e->e_perm &= ~((umask & S_IRWXG) >> 3);
66405+ break;
66406 case ACL_OTHER:
66407+ acl_e->e_perm &= ~(umask & S_IRWXO);
66408 break;
66409
66410 case ACL_USER:
66411+ acl_e->e_perm &= ~((umask & S_IRWXU) >> 6);
66412 acl_e->e_uid =
66413 make_kuid(user_ns,
66414 le32_to_cpu(entry->e_id));
66415@@ -702,6 +712,7 @@ posix_acl_from_xattr(struct user_namespace *user_ns,
66416 goto fail;
66417 break;
66418 case ACL_GROUP:
66419+ acl_e->e_perm &= ~((umask & S_IRWXG) >> 3);
66420 acl_e->e_gid =
66421 make_kgid(user_ns,
66422 le32_to_cpu(entry->e_id));
66423diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
66424index 2183fcf..3c32a98 100644
66425--- a/fs/proc/Kconfig
66426+++ b/fs/proc/Kconfig
66427@@ -30,7 +30,7 @@ config PROC_FS
66428
66429 config PROC_KCORE
66430 bool "/proc/kcore support" if !ARM
66431- depends on PROC_FS && MMU
66432+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
66433 help
66434 Provides a virtual ELF core file of the live kernel. This can
66435 be read with gdb and other ELF tools. No modifications can be
66436@@ -38,8 +38,8 @@ config PROC_KCORE
66437
66438 config PROC_VMCORE
66439 bool "/proc/vmcore support"
66440- depends on PROC_FS && CRASH_DUMP
66441- default y
66442+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
66443+ default n
66444 help
66445 Exports the dump image of crashed kernel in ELF format.
66446
66447@@ -63,8 +63,8 @@ config PROC_SYSCTL
66448 limited in memory.
66449
66450 config PROC_PAGE_MONITOR
66451- default y
66452- depends on PROC_FS && MMU
66453+ default n
66454+ depends on PROC_FS && MMU && !GRKERNSEC
66455 bool "Enable /proc page monitoring" if EXPERT
66456 help
66457 Various /proc files exist to monitor process memory utilization:
66458diff --git a/fs/proc/array.c b/fs/proc/array.c
66459index cd3653e..9b9b79a 100644
66460--- a/fs/proc/array.c
66461+++ b/fs/proc/array.c
66462@@ -60,6 +60,7 @@
66463 #include <linux/tty.h>
66464 #include <linux/string.h>
66465 #include <linux/mman.h>
66466+#include <linux/grsecurity.h>
66467 #include <linux/proc_fs.h>
66468 #include <linux/ioport.h>
66469 #include <linux/uaccess.h>
66470@@ -347,6 +348,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
66471 seq_putc(m, '\n');
66472 }
66473
66474+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
66475+static inline void task_pax(struct seq_file *m, struct task_struct *p)
66476+{
66477+ if (p->mm)
66478+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
66479+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
66480+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
66481+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
66482+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
66483+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
66484+ else
66485+ seq_printf(m, "PaX:\t-----\n");
66486+}
66487+#endif
66488+
66489 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
66490 struct pid *pid, struct task_struct *task)
66491 {
66492@@ -365,9 +381,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
66493 task_cpus_allowed(m, task);
66494 cpuset_task_status_allowed(m, task);
66495 task_context_switch_counts(m, task);
66496+
66497+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
66498+ task_pax(m, task);
66499+#endif
66500+
66501+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
66502+ task_grsec_rbac(m, task);
66503+#endif
66504+
66505 return 0;
66506 }
66507
66508+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66509+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
66510+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
66511+ _mm->pax_flags & MF_PAX_SEGMEXEC))
66512+#endif
66513+
66514 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
66515 struct pid *pid, struct task_struct *task, int whole)
66516 {
66517@@ -389,6 +420,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
66518 char tcomm[sizeof(task->comm)];
66519 unsigned long flags;
66520
66521+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66522+ if (current->exec_id != m->exec_id) {
66523+ gr_log_badprocpid("stat");
66524+ return 0;
66525+ }
66526+#endif
66527+
66528 state = *get_task_state(task);
66529 vsize = eip = esp = 0;
66530 permitted = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
66531@@ -459,6 +497,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
66532 gtime = task_gtime(task);
66533 }
66534
66535+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66536+ if (PAX_RAND_FLAGS(mm)) {
66537+ eip = 0;
66538+ esp = 0;
66539+ wchan = 0;
66540+ }
66541+#endif
66542+#ifdef CONFIG_GRKERNSEC_HIDESYM
66543+ wchan = 0;
66544+ eip =0;
66545+ esp =0;
66546+#endif
66547+
66548 /* scale priority and nice values from timeslices to -20..20 */
66549 /* to make it look like a "normal" Unix priority/nice value */
66550 priority = task_prio(task);
66551@@ -490,9 +541,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
66552 seq_put_decimal_ull(m, ' ', vsize);
66553 seq_put_decimal_ull(m, ' ', mm ? get_mm_rss(mm) : 0);
66554 seq_put_decimal_ull(m, ' ', rsslim);
66555+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66556+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0));
66557+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0));
66558+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0));
66559+#else
66560 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->start_code : 1) : 0);
66561 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->end_code : 1) : 0);
66562 seq_put_decimal_ull(m, ' ', (permitted && mm) ? mm->start_stack : 0);
66563+#endif
66564 seq_put_decimal_ull(m, ' ', esp);
66565 seq_put_decimal_ull(m, ' ', eip);
66566 /* The signal information here is obsolete.
66567@@ -514,7 +571,11 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
66568 seq_put_decimal_ull(m, ' ', cputime_to_clock_t(gtime));
66569 seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cgtime));
66570
66571- if (mm && permitted) {
66572+ if (mm && permitted
66573+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66574+ && !PAX_RAND_FLAGS(mm)
66575+#endif
66576+ ) {
66577 seq_put_decimal_ull(m, ' ', mm->start_data);
66578 seq_put_decimal_ull(m, ' ', mm->end_data);
66579 seq_put_decimal_ull(m, ' ', mm->start_brk);
66580@@ -552,8 +613,15 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
66581 struct pid *pid, struct task_struct *task)
66582 {
66583 unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
66584- struct mm_struct *mm = get_task_mm(task);
66585+ struct mm_struct *mm;
66586
66587+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66588+ if (current->exec_id != m->exec_id) {
66589+ gr_log_badprocpid("statm");
66590+ return 0;
66591+ }
66592+#endif
66593+ mm = get_task_mm(task);
66594 if (mm) {
66595 size = task_statm(mm, &shared, &text, &data, &resident);
66596 mmput(mm);
66597@@ -576,6 +644,20 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
66598 return 0;
66599 }
66600
66601+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
66602+int proc_pid_ipaddr(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *task)
66603+{
66604+ unsigned long flags;
66605+ u32 curr_ip = 0;
66606+
66607+ if (lock_task_sighand(task, &flags)) {
66608+ curr_ip = task->signal->curr_ip;
66609+ unlock_task_sighand(task, &flags);
66610+ }
66611+ return seq_printf(m, "%pI4\n", &curr_ip);
66612+}
66613+#endif
66614+
66615 #ifdef CONFIG_CHECKPOINT_RESTORE
66616 static struct pid *
66617 get_children_pid(struct inode *inode, struct pid *pid_prev, loff_t pos)
66618diff --git a/fs/proc/base.c b/fs/proc/base.c
66619index baf852b..03fe930 100644
66620--- a/fs/proc/base.c
66621+++ b/fs/proc/base.c
66622@@ -113,6 +113,14 @@ struct pid_entry {
66623 union proc_op op;
66624 };
66625
66626+struct getdents_callback {
66627+ struct linux_dirent __user * current_dir;
66628+ struct linux_dirent __user * previous;
66629+ struct file * file;
66630+ int count;
66631+ int error;
66632+};
66633+
66634 #define NOD(NAME, MODE, IOP, FOP, OP) { \
66635 .name = (NAME), \
66636 .len = sizeof(NAME) - 1, \
66637@@ -208,12 +216,28 @@ static int proc_pid_cmdline(struct seq_file *m, struct pid_namespace *ns,
66638 return 0;
66639 }
66640
66641+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66642+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
66643+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
66644+ _mm->pax_flags & MF_PAX_SEGMEXEC))
66645+#endif
66646+
66647 static int proc_pid_auxv(struct seq_file *m, struct pid_namespace *ns,
66648 struct pid *pid, struct task_struct *task)
66649 {
66650 struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ);
66651 if (mm && !IS_ERR(mm)) {
66652 unsigned int nwords = 0;
66653+
66654+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66655+ /* allow if we're currently ptracing this task */
66656+ if (PAX_RAND_FLAGS(mm) &&
66657+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
66658+ mmput(mm);
66659+ return 0;
66660+ }
66661+#endif
66662+
66663 do {
66664 nwords += 2;
66665 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
66666@@ -225,7 +249,7 @@ static int proc_pid_auxv(struct seq_file *m, struct pid_namespace *ns,
66667 }
66668
66669
66670-#ifdef CONFIG_KALLSYMS
66671+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
66672 /*
66673 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
66674 * Returns the resolved symbol. If that fails, simply return the address.
66675@@ -265,7 +289,7 @@ static void unlock_trace(struct task_struct *task)
66676 mutex_unlock(&task->signal->cred_guard_mutex);
66677 }
66678
66679-#ifdef CONFIG_STACKTRACE
66680+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
66681
66682 #define MAX_STACK_TRACE_DEPTH 64
66683
66684@@ -487,7 +511,7 @@ static int proc_pid_limits(struct seq_file *m, struct pid_namespace *ns,
66685 return 0;
66686 }
66687
66688-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
66689+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
66690 static int proc_pid_syscall(struct seq_file *m, struct pid_namespace *ns,
66691 struct pid *pid, struct task_struct *task)
66692 {
66693@@ -517,7 +541,7 @@ static int proc_pid_syscall(struct seq_file *m, struct pid_namespace *ns,
66694 /************************************************************************/
66695
66696 /* permission checks */
66697-static int proc_fd_access_allowed(struct inode *inode)
66698+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
66699 {
66700 struct task_struct *task;
66701 int allowed = 0;
66702@@ -527,7 +551,10 @@ static int proc_fd_access_allowed(struct inode *inode)
66703 */
66704 task = get_proc_task(inode);
66705 if (task) {
66706- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
66707+ if (log)
66708+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
66709+ else
66710+ allowed = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
66711 put_task_struct(task);
66712 }
66713 return allowed;
66714@@ -558,10 +585,35 @@ static bool has_pid_permissions(struct pid_namespace *pid,
66715 struct task_struct *task,
66716 int hide_pid_min)
66717 {
66718+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
66719+ return false;
66720+
66721+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66722+ rcu_read_lock();
66723+ {
66724+ const struct cred *tmpcred = current_cred();
66725+ const struct cred *cred = __task_cred(task);
66726+
66727+ if (uid_eq(tmpcred->uid, GLOBAL_ROOT_UID) || uid_eq(tmpcred->uid, cred->uid)
66728+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
66729+ || in_group_p(grsec_proc_gid)
66730+#endif
66731+ ) {
66732+ rcu_read_unlock();
66733+ return true;
66734+ }
66735+ }
66736+ rcu_read_unlock();
66737+
66738+ if (!pid->hide_pid)
66739+ return false;
66740+#endif
66741+
66742 if (pid->hide_pid < hide_pid_min)
66743 return true;
66744 if (in_group_p(pid->pid_gid))
66745 return true;
66746+
66747 return ptrace_may_access(task, PTRACE_MODE_READ);
66748 }
66749
66750@@ -579,7 +631,11 @@ static int proc_pid_permission(struct inode *inode, int mask)
66751 put_task_struct(task);
66752
66753 if (!has_perms) {
66754+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66755+ {
66756+#else
66757 if (pid->hide_pid == 2) {
66758+#endif
66759 /*
66760 * Let's make getdents(), stat(), and open()
66761 * consistent with each other. If a process
66762@@ -640,6 +696,11 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
66763 if (!task)
66764 return -ESRCH;
66765
66766+ if (gr_acl_handle_procpidmem(task)) {
66767+ put_task_struct(task);
66768+ return -EPERM;
66769+ }
66770+
66771 mm = mm_access(task, mode);
66772 put_task_struct(task);
66773
66774@@ -655,6 +716,10 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
66775
66776 file->private_data = mm;
66777
66778+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66779+ file->f_version = current->exec_id;
66780+#endif
66781+
66782 return 0;
66783 }
66784
66785@@ -676,6 +741,17 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
66786 ssize_t copied;
66787 char *page;
66788
66789+#ifdef CONFIG_GRKERNSEC
66790+ if (write)
66791+ return -EPERM;
66792+#endif
66793+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66794+ if (file->f_version != current->exec_id) {
66795+ gr_log_badprocpid("mem");
66796+ return 0;
66797+ }
66798+#endif
66799+
66800 if (!mm)
66801 return 0;
66802
66803@@ -688,7 +764,7 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
66804 goto free;
66805
66806 while (count > 0) {
66807- int this_len = min_t(int, count, PAGE_SIZE);
66808+ ssize_t this_len = min_t(ssize_t, count, PAGE_SIZE);
66809
66810 if (write && copy_from_user(page, buf, this_len)) {
66811 copied = -EFAULT;
66812@@ -780,6 +856,13 @@ static ssize_t environ_read(struct file *file, char __user *buf,
66813 if (!mm)
66814 return 0;
66815
66816+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66817+ if (file->f_version != current->exec_id) {
66818+ gr_log_badprocpid("environ");
66819+ return 0;
66820+ }
66821+#endif
66822+
66823 page = (char *)__get_free_page(GFP_TEMPORARY);
66824 if (!page)
66825 return -ENOMEM;
66826@@ -789,7 +872,7 @@ static ssize_t environ_read(struct file *file, char __user *buf,
66827 goto free;
66828 while (count > 0) {
66829 size_t this_len, max_len;
66830- int retval;
66831+ ssize_t retval;
66832
66833 if (src >= (mm->env_end - mm->env_start))
66834 break;
66835@@ -1403,7 +1486,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
66836 int error = -EACCES;
66837
66838 /* Are we allowed to snoop on the tasks file descriptors? */
66839- if (!proc_fd_access_allowed(inode))
66840+ if (!proc_fd_access_allowed(inode, 0))
66841 goto out;
66842
66843 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
66844@@ -1447,8 +1530,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
66845 struct path path;
66846
66847 /* Are we allowed to snoop on the tasks file descriptors? */
66848- if (!proc_fd_access_allowed(inode))
66849- goto out;
66850+ /* logging this is needed for learning on chromium to work properly,
66851+ but we don't want to flood the logs from 'ps' which does a readlink
66852+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
66853+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
66854+ */
66855+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
66856+ if (!proc_fd_access_allowed(inode,0))
66857+ goto out;
66858+ } else {
66859+ if (!proc_fd_access_allowed(inode,1))
66860+ goto out;
66861+ }
66862
66863 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
66864 if (error)
66865@@ -1498,7 +1591,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
66866 rcu_read_lock();
66867 cred = __task_cred(task);
66868 inode->i_uid = cred->euid;
66869+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
66870+ inode->i_gid = grsec_proc_gid;
66871+#else
66872 inode->i_gid = cred->egid;
66873+#endif
66874 rcu_read_unlock();
66875 }
66876 security_task_to_inode(task, inode);
66877@@ -1534,10 +1631,19 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
66878 return -ENOENT;
66879 }
66880 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
66881+#ifdef CONFIG_GRKERNSEC_PROC_USER
66882+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
66883+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66884+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
66885+#endif
66886 task_dumpable(task)) {
66887 cred = __task_cred(task);
66888 stat->uid = cred->euid;
66889+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
66890+ stat->gid = grsec_proc_gid;
66891+#else
66892 stat->gid = cred->egid;
66893+#endif
66894 }
66895 }
66896 rcu_read_unlock();
66897@@ -1575,11 +1681,20 @@ int pid_revalidate(struct dentry *dentry, unsigned int flags)
66898
66899 if (task) {
66900 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
66901+#ifdef CONFIG_GRKERNSEC_PROC_USER
66902+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
66903+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66904+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
66905+#endif
66906 task_dumpable(task)) {
66907 rcu_read_lock();
66908 cred = __task_cred(task);
66909 inode->i_uid = cred->euid;
66910+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
66911+ inode->i_gid = grsec_proc_gid;
66912+#else
66913 inode->i_gid = cred->egid;
66914+#endif
66915 rcu_read_unlock();
66916 } else {
66917 inode->i_uid = GLOBAL_ROOT_UID;
66918@@ -2114,6 +2229,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
66919 if (!task)
66920 goto out_no_task;
66921
66922+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
66923+ goto out;
66924+
66925 /*
66926 * Yes, it does not scale. And it should not. Don't add
66927 * new entries into /proc/<tgid>/ without very good reasons.
66928@@ -2144,6 +2262,9 @@ static int proc_pident_readdir(struct file *file, struct dir_context *ctx,
66929 if (!task)
66930 return -ENOENT;
66931
66932+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
66933+ goto out;
66934+
66935 if (!dir_emit_dots(file, ctx))
66936 goto out;
66937
66938@@ -2535,7 +2656,7 @@ static const struct pid_entry tgid_base_stuff[] = {
66939 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
66940 #endif
66941 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
66942-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
66943+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
66944 ONE("syscall", S_IRUSR, proc_pid_syscall),
66945 #endif
66946 ONE("cmdline", S_IRUGO, proc_pid_cmdline),
66947@@ -2560,10 +2681,10 @@ static const struct pid_entry tgid_base_stuff[] = {
66948 #ifdef CONFIG_SECURITY
66949 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
66950 #endif
66951-#ifdef CONFIG_KALLSYMS
66952+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
66953 ONE("wchan", S_IRUGO, proc_pid_wchan),
66954 #endif
66955-#ifdef CONFIG_STACKTRACE
66956+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
66957 ONE("stack", S_IRUSR, proc_pid_stack),
66958 #endif
66959 #ifdef CONFIG_SCHEDSTATS
66960@@ -2597,6 +2718,9 @@ static const struct pid_entry tgid_base_stuff[] = {
66961 #ifdef CONFIG_HARDWALL
66962 ONE("hardwall", S_IRUGO, proc_pid_hardwall),
66963 #endif
66964+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
66965+ ONE("ipaddr", S_IRUSR, proc_pid_ipaddr),
66966+#endif
66967 #ifdef CONFIG_USER_NS
66968 REG("uid_map", S_IRUGO|S_IWUSR, proc_uid_map_operations),
66969 REG("gid_map", S_IRUGO|S_IWUSR, proc_gid_map_operations),
66970@@ -2727,7 +2851,14 @@ static int proc_pid_instantiate(struct inode *dir,
66971 if (!inode)
66972 goto out;
66973
66974+#ifdef CONFIG_GRKERNSEC_PROC_USER
66975+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
66976+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66977+ inode->i_gid = grsec_proc_gid;
66978+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
66979+#else
66980 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
66981+#endif
66982 inode->i_op = &proc_tgid_base_inode_operations;
66983 inode->i_fop = &proc_tgid_base_operations;
66984 inode->i_flags|=S_IMMUTABLE;
66985@@ -2765,7 +2896,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, unsign
66986 if (!task)
66987 goto out;
66988
66989+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
66990+ goto out_put_task;
66991+
66992 result = proc_pid_instantiate(dir, dentry, task, NULL);
66993+out_put_task:
66994 put_task_struct(task);
66995 out:
66996 return ERR_PTR(result);
66997@@ -2879,7 +3014,7 @@ static const struct pid_entry tid_base_stuff[] = {
66998 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
66999 #endif
67000 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
67001-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
67002+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
67003 ONE("syscall", S_IRUSR, proc_pid_syscall),
67004 #endif
67005 ONE("cmdline", S_IRUGO, proc_pid_cmdline),
67006@@ -2906,10 +3041,10 @@ static const struct pid_entry tid_base_stuff[] = {
67007 #ifdef CONFIG_SECURITY
67008 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
67009 #endif
67010-#ifdef CONFIG_KALLSYMS
67011+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
67012 ONE("wchan", S_IRUGO, proc_pid_wchan),
67013 #endif
67014-#ifdef CONFIG_STACKTRACE
67015+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
67016 ONE("stack", S_IRUSR, proc_pid_stack),
67017 #endif
67018 #ifdef CONFIG_SCHEDSTATS
67019diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
67020index cbd82df..c0407d2 100644
67021--- a/fs/proc/cmdline.c
67022+++ b/fs/proc/cmdline.c
67023@@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
67024
67025 static int __init proc_cmdline_init(void)
67026 {
67027+#ifdef CONFIG_GRKERNSEC_PROC_ADD
67028+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
67029+#else
67030 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
67031+#endif
67032 return 0;
67033 }
67034 fs_initcall(proc_cmdline_init);
67035diff --git a/fs/proc/devices.c b/fs/proc/devices.c
67036index 50493ed..248166b 100644
67037--- a/fs/proc/devices.c
67038+++ b/fs/proc/devices.c
67039@@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
67040
67041 static int __init proc_devices_init(void)
67042 {
67043+#ifdef CONFIG_GRKERNSEC_PROC_ADD
67044+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
67045+#else
67046 proc_create("devices", 0, NULL, &proc_devinfo_operations);
67047+#endif
67048 return 0;
67049 }
67050 fs_initcall(proc_devices_init);
67051diff --git a/fs/proc/fd.c b/fs/proc/fd.c
67052index 955bb55..71948bd 100644
67053--- a/fs/proc/fd.c
67054+++ b/fs/proc/fd.c
67055@@ -26,7 +26,8 @@ static int seq_show(struct seq_file *m, void *v)
67056 if (!task)
67057 return -ENOENT;
67058
67059- files = get_files_struct(task);
67060+ if (!gr_acl_handle_procpidmem(task))
67061+ files = get_files_struct(task);
67062 put_task_struct(task);
67063
67064 if (files) {
67065@@ -285,11 +286,21 @@ static struct dentry *proc_lookupfd(struct inode *dir, struct dentry *dentry,
67066 */
67067 int proc_fd_permission(struct inode *inode, int mask)
67068 {
67069+ struct task_struct *task;
67070 int rv = generic_permission(inode, mask);
67071- if (rv == 0)
67072- return 0;
67073+
67074 if (task_tgid(current) == proc_pid(inode))
67075 rv = 0;
67076+
67077+ task = get_proc_task(inode);
67078+ if (task == NULL)
67079+ return rv;
67080+
67081+ if (gr_acl_handle_procpidmem(task))
67082+ rv = -EACCES;
67083+
67084+ put_task_struct(task);
67085+
67086 return rv;
67087 }
67088
67089diff --git a/fs/proc/generic.c b/fs/proc/generic.c
67090index 317b726..e329aed 100644
67091--- a/fs/proc/generic.c
67092+++ b/fs/proc/generic.c
67093@@ -23,6 +23,7 @@
67094 #include <linux/bitops.h>
67095 #include <linux/spinlock.h>
67096 #include <linux/completion.h>
67097+#include <linux/grsecurity.h>
67098 #include <asm/uaccess.h>
67099
67100 #include "internal.h"
67101@@ -207,6 +208,15 @@ struct dentry *proc_lookup(struct inode *dir, struct dentry *dentry,
67102 return proc_lookup_de(PDE(dir), dir, dentry);
67103 }
67104
67105+struct dentry *proc_lookup_restrict(struct inode *dir, struct dentry *dentry,
67106+ unsigned int flags)
67107+{
67108+ if (gr_proc_is_restricted())
67109+ return ERR_PTR(-EACCES);
67110+
67111+ return proc_lookup_de(PDE(dir), dir, dentry);
67112+}
67113+
67114 /*
67115 * This returns non-zero if at EOF, so that the /proc
67116 * root directory can use this and check if it should
67117@@ -264,6 +274,16 @@ int proc_readdir(struct file *file, struct dir_context *ctx)
67118 return proc_readdir_de(PDE(inode), file, ctx);
67119 }
67120
67121+int proc_readdir_restrict(struct file *file, struct dir_context *ctx)
67122+{
67123+ struct inode *inode = file_inode(file);
67124+
67125+ if (gr_proc_is_restricted())
67126+ return -EACCES;
67127+
67128+ return proc_readdir_de(PDE(inode), file, ctx);
67129+}
67130+
67131 /*
67132 * These are the generic /proc directory operations. They
67133 * use the in-memory "struct proc_dir_entry" tree to parse
67134@@ -275,6 +295,12 @@ static const struct file_operations proc_dir_operations = {
67135 .iterate = proc_readdir,
67136 };
67137
67138+static const struct file_operations proc_dir_restricted_operations = {
67139+ .llseek = generic_file_llseek,
67140+ .read = generic_read_dir,
67141+ .iterate = proc_readdir_restrict,
67142+};
67143+
67144 /*
67145 * proc directories can do almost nothing..
67146 */
67147@@ -284,6 +310,12 @@ static const struct inode_operations proc_dir_inode_operations = {
67148 .setattr = proc_notify_change,
67149 };
67150
67151+static const struct inode_operations proc_dir_restricted_inode_operations = {
67152+ .lookup = proc_lookup_restrict,
67153+ .getattr = proc_getattr,
67154+ .setattr = proc_notify_change,
67155+};
67156+
67157 static int proc_register(struct proc_dir_entry * dir, struct proc_dir_entry * dp)
67158 {
67159 struct proc_dir_entry *tmp;
67160@@ -294,8 +326,13 @@ static int proc_register(struct proc_dir_entry * dir, struct proc_dir_entry * dp
67161 return ret;
67162
67163 if (S_ISDIR(dp->mode)) {
67164- dp->proc_fops = &proc_dir_operations;
67165- dp->proc_iops = &proc_dir_inode_operations;
67166+ if (dp->restricted) {
67167+ dp->proc_fops = &proc_dir_restricted_operations;
67168+ dp->proc_iops = &proc_dir_restricted_inode_operations;
67169+ } else {
67170+ dp->proc_fops = &proc_dir_operations;
67171+ dp->proc_iops = &proc_dir_inode_operations;
67172+ }
67173 dir->nlink++;
67174 } else if (S_ISLNK(dp->mode)) {
67175 dp->proc_iops = &proc_link_inode_operations;
67176@@ -407,6 +444,27 @@ struct proc_dir_entry *proc_mkdir_data(const char *name, umode_t mode,
67177 }
67178 EXPORT_SYMBOL_GPL(proc_mkdir_data);
67179
67180+struct proc_dir_entry *proc_mkdir_data_restrict(const char *name, umode_t mode,
67181+ struct proc_dir_entry *parent, void *data)
67182+{
67183+ struct proc_dir_entry *ent;
67184+
67185+ if (mode == 0)
67186+ mode = S_IRUGO | S_IXUGO;
67187+
67188+ ent = __proc_create(&parent, name, S_IFDIR | mode, 2);
67189+ if (ent) {
67190+ ent->data = data;
67191+ ent->restricted = 1;
67192+ if (proc_register(parent, ent) < 0) {
67193+ kfree(ent);
67194+ ent = NULL;
67195+ }
67196+ }
67197+ return ent;
67198+}
67199+EXPORT_SYMBOL_GPL(proc_mkdir_data_restrict);
67200+
67201 struct proc_dir_entry *proc_mkdir_mode(const char *name, umode_t mode,
67202 struct proc_dir_entry *parent)
67203 {
67204@@ -421,6 +479,13 @@ struct proc_dir_entry *proc_mkdir(const char *name,
67205 }
67206 EXPORT_SYMBOL(proc_mkdir);
67207
67208+struct proc_dir_entry *proc_mkdir_restrict(const char *name,
67209+ struct proc_dir_entry *parent)
67210+{
67211+ return proc_mkdir_data_restrict(name, 0, parent, NULL);
67212+}
67213+EXPORT_SYMBOL(proc_mkdir_restrict);
67214+
67215 struct proc_dir_entry *proc_create_data(const char *name, umode_t mode,
67216 struct proc_dir_entry *parent,
67217 const struct file_operations *proc_fops,
67218diff --git a/fs/proc/inode.c b/fs/proc/inode.c
67219index 333080d..0a35ec4 100644
67220--- a/fs/proc/inode.c
67221+++ b/fs/proc/inode.c
67222@@ -23,11 +23,17 @@
67223 #include <linux/slab.h>
67224 #include <linux/mount.h>
67225 #include <linux/magic.h>
67226+#include <linux/grsecurity.h>
67227
67228 #include <asm/uaccess.h>
67229
67230 #include "internal.h"
67231
67232+#ifdef CONFIG_PROC_SYSCTL
67233+extern const struct inode_operations proc_sys_inode_operations;
67234+extern const struct inode_operations proc_sys_dir_operations;
67235+#endif
67236+
67237 static void proc_evict_inode(struct inode *inode)
67238 {
67239 struct proc_dir_entry *de;
67240@@ -55,6 +61,13 @@ static void proc_evict_inode(struct inode *inode)
67241 ns = PROC_I(inode)->ns.ns;
67242 if (ns_ops && ns)
67243 ns_ops->put(ns);
67244+
67245+#ifdef CONFIG_PROC_SYSCTL
67246+ if (inode->i_op == &proc_sys_inode_operations ||
67247+ inode->i_op == &proc_sys_dir_operations)
67248+ gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
67249+#endif
67250+
67251 }
67252
67253 static struct kmem_cache * proc_inode_cachep;
67254@@ -413,7 +426,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
67255 if (de->mode) {
67256 inode->i_mode = de->mode;
67257 inode->i_uid = de->uid;
67258+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
67259+ inode->i_gid = grsec_proc_gid;
67260+#else
67261 inode->i_gid = de->gid;
67262+#endif
67263 }
67264 if (de->size)
67265 inode->i_size = de->size;
67266diff --git a/fs/proc/internal.h b/fs/proc/internal.h
67267index 7da13e4..68d0981 100644
67268--- a/fs/proc/internal.h
67269+++ b/fs/proc/internal.h
67270@@ -46,9 +46,10 @@ struct proc_dir_entry {
67271 struct completion *pde_unload_completion;
67272 struct list_head pde_openers; /* who did ->open, but not ->release */
67273 spinlock_t pde_unload_lock; /* proc_fops checks and pde_users bumps */
67274+ u8 restricted; /* a directory in /proc/net that should be restricted via GRKERNSEC_PROC */
67275 u8 namelen;
67276 char name[];
67277-};
67278+} __randomize_layout;
67279
67280 union proc_op {
67281 int (*proc_get_link)(struct dentry *, struct path *);
67282@@ -66,7 +67,7 @@ struct proc_inode {
67283 struct ctl_table *sysctl_entry;
67284 struct proc_ns ns;
67285 struct inode vfs_inode;
67286-};
67287+} __randomize_layout;
67288
67289 /*
67290 * General functions
67291@@ -154,6 +155,10 @@ extern int proc_pid_status(struct seq_file *, struct pid_namespace *,
67292 struct pid *, struct task_struct *);
67293 extern int proc_pid_statm(struct seq_file *, struct pid_namespace *,
67294 struct pid *, struct task_struct *);
67295+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
67296+extern int proc_pid_ipaddr(struct seq_file *, struct pid_namespace *,
67297+ struct pid *, struct task_struct *);
67298+#endif
67299
67300 /*
67301 * base.c
67302@@ -178,9 +183,11 @@ extern bool proc_fill_cache(struct file *, struct dir_context *, const char *, i
67303 * generic.c
67304 */
67305 extern struct dentry *proc_lookup(struct inode *, struct dentry *, unsigned int);
67306+extern struct dentry *proc_lookup_restrict(struct inode *, struct dentry *, unsigned int);
67307 extern struct dentry *proc_lookup_de(struct proc_dir_entry *, struct inode *,
67308 struct dentry *);
67309 extern int proc_readdir(struct file *, struct dir_context *);
67310+extern int proc_readdir_restrict(struct file *, struct dir_context *);
67311 extern int proc_readdir_de(struct proc_dir_entry *, struct file *, struct dir_context *);
67312
67313 static inline struct proc_dir_entry *pde_get(struct proc_dir_entry *pde)
67314diff --git a/fs/proc/interrupts.c b/fs/proc/interrupts.c
67315index a352d57..cb94a5c 100644
67316--- a/fs/proc/interrupts.c
67317+++ b/fs/proc/interrupts.c
67318@@ -47,7 +47,11 @@ static const struct file_operations proc_interrupts_operations = {
67319
67320 static int __init proc_interrupts_init(void)
67321 {
67322+#ifdef CONFIG_GRKERNSEC_PROC_ADD
67323+ proc_create_grsec("interrupts", 0, NULL, &proc_interrupts_operations);
67324+#else
67325 proc_create("interrupts", 0, NULL, &proc_interrupts_operations);
67326+#endif
67327 return 0;
67328 }
67329 fs_initcall(proc_interrupts_init);
67330diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
67331index 6df8d07..3321060 100644
67332--- a/fs/proc/kcore.c
67333+++ b/fs/proc/kcore.c
67334@@ -483,9 +483,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
67335 * the addresses in the elf_phdr on our list.
67336 */
67337 start = kc_offset_to_vaddr(*fpos - elf_buflen);
67338- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
67339+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
67340+ if (tsz > buflen)
67341 tsz = buflen;
67342-
67343+
67344 while (buflen) {
67345 struct kcore_list *m;
67346
67347@@ -514,20 +515,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
67348 kfree(elf_buf);
67349 } else {
67350 if (kern_addr_valid(start)) {
67351- unsigned long n;
67352+ char *elf_buf;
67353+ mm_segment_t oldfs;
67354
67355- n = copy_to_user(buffer, (char *)start, tsz);
67356- /*
67357- * We cannot distinguish between fault on source
67358- * and fault on destination. When this happens
67359- * we clear too and hope it will trigger the
67360- * EFAULT again.
67361- */
67362- if (n) {
67363- if (clear_user(buffer + tsz - n,
67364- n))
67365+ elf_buf = kmalloc(tsz, GFP_KERNEL);
67366+ if (!elf_buf)
67367+ return -ENOMEM;
67368+ oldfs = get_fs();
67369+ set_fs(KERNEL_DS);
67370+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
67371+ set_fs(oldfs);
67372+ if (copy_to_user(buffer, elf_buf, tsz)) {
67373+ kfree(elf_buf);
67374 return -EFAULT;
67375+ }
67376 }
67377+ set_fs(oldfs);
67378+ kfree(elf_buf);
67379 } else {
67380 if (clear_user(buffer, tsz))
67381 return -EFAULT;
67382@@ -547,6 +551,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
67383
67384 static int open_kcore(struct inode *inode, struct file *filp)
67385 {
67386+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
67387+ return -EPERM;
67388+#endif
67389 if (!capable(CAP_SYS_RAWIO))
67390 return -EPERM;
67391 if (kcore_need_update)
67392diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
67393index aa1eee0..03dda72 100644
67394--- a/fs/proc/meminfo.c
67395+++ b/fs/proc/meminfo.c
67396@@ -187,7 +187,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
67397 vmi.used >> 10,
67398 vmi.largest_chunk >> 10
67399 #ifdef CONFIG_MEMORY_FAILURE
67400- ,atomic_long_read(&num_poisoned_pages) << (PAGE_SHIFT - 10)
67401+ ,atomic_long_read_unchecked(&num_poisoned_pages) << (PAGE_SHIFT - 10)
67402 #endif
67403 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
67404 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
67405diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
67406index d4a3574..b421ce9 100644
67407--- a/fs/proc/nommu.c
67408+++ b/fs/proc/nommu.c
67409@@ -64,7 +64,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
67410
67411 if (file) {
67412 seq_pad(m, ' ');
67413- seq_path(m, &file->f_path, "");
67414+ seq_path(m, &file->f_path, "\n\\");
67415 }
67416
67417 seq_putc(m, '\n');
67418diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
67419index a63af3e..b4f262a 100644
67420--- a/fs/proc/proc_net.c
67421+++ b/fs/proc/proc_net.c
67422@@ -23,9 +23,27 @@
67423 #include <linux/nsproxy.h>
67424 #include <net/net_namespace.h>
67425 #include <linux/seq_file.h>
67426+#include <linux/grsecurity.h>
67427
67428 #include "internal.h"
67429
67430+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
67431+static struct seq_operations *ipv6_seq_ops_addr;
67432+
67433+void register_ipv6_seq_ops_addr(struct seq_operations *addr)
67434+{
67435+ ipv6_seq_ops_addr = addr;
67436+}
67437+
67438+void unregister_ipv6_seq_ops_addr(void)
67439+{
67440+ ipv6_seq_ops_addr = NULL;
67441+}
67442+
67443+EXPORT_SYMBOL_GPL(register_ipv6_seq_ops_addr);
67444+EXPORT_SYMBOL_GPL(unregister_ipv6_seq_ops_addr);
67445+#endif
67446+
67447 static inline struct net *PDE_NET(struct proc_dir_entry *pde)
67448 {
67449 return pde->parent->data;
67450@@ -36,6 +54,8 @@ static struct net *get_proc_net(const struct inode *inode)
67451 return maybe_get_net(PDE_NET(PDE(inode)));
67452 }
67453
67454+extern const struct seq_operations dev_seq_ops;
67455+
67456 int seq_open_net(struct inode *ino, struct file *f,
67457 const struct seq_operations *ops, int size)
67458 {
67459@@ -44,6 +64,14 @@ int seq_open_net(struct inode *ino, struct file *f,
67460
67461 BUG_ON(size < sizeof(*p));
67462
67463+ /* only permit access to /proc/net/dev */
67464+ if (
67465+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
67466+ ops != ipv6_seq_ops_addr &&
67467+#endif
67468+ ops != &dev_seq_ops && gr_proc_is_restricted())
67469+ return -EACCES;
67470+
67471 net = get_proc_net(ino);
67472 if (net == NULL)
67473 return -ENXIO;
67474@@ -66,6 +94,9 @@ int single_open_net(struct inode *inode, struct file *file,
67475 int err;
67476 struct net *net;
67477
67478+ if (gr_proc_is_restricted())
67479+ return -EACCES;
67480+
67481 err = -ENXIO;
67482 net = get_proc_net(inode);
67483 if (net == NULL)
67484diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
67485index f92d5dd..26398ac 100644
67486--- a/fs/proc/proc_sysctl.c
67487+++ b/fs/proc/proc_sysctl.c
67488@@ -11,13 +11,21 @@
67489 #include <linux/namei.h>
67490 #include <linux/mm.h>
67491 #include <linux/module.h>
67492+#include <linux/nsproxy.h>
67493+#ifdef CONFIG_GRKERNSEC
67494+#include <net/net_namespace.h>
67495+#endif
67496 #include "internal.h"
67497
67498+extern int gr_handle_chroot_sysctl(const int op);
67499+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
67500+ const int op);
67501+
67502 static const struct dentry_operations proc_sys_dentry_operations;
67503 static const struct file_operations proc_sys_file_operations;
67504-static const struct inode_operations proc_sys_inode_operations;
67505+const struct inode_operations proc_sys_inode_operations;
67506 static const struct file_operations proc_sys_dir_file_operations;
67507-static const struct inode_operations proc_sys_dir_operations;
67508+const struct inode_operations proc_sys_dir_operations;
67509
67510 void proc_sys_poll_notify(struct ctl_table_poll *poll)
67511 {
67512@@ -467,6 +475,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
67513
67514 err = NULL;
67515 d_set_d_op(dentry, &proc_sys_dentry_operations);
67516+
67517+ gr_handle_proc_create(dentry, inode);
67518+
67519 d_add(dentry, inode);
67520
67521 out:
67522@@ -482,6 +493,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
67523 struct inode *inode = file_inode(filp);
67524 struct ctl_table_header *head = grab_header(inode);
67525 struct ctl_table *table = PROC_I(inode)->sysctl_entry;
67526+ int op = write ? MAY_WRITE : MAY_READ;
67527 ssize_t error;
67528 size_t res;
67529
67530@@ -493,7 +505,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
67531 * and won't be until we finish.
67532 */
67533 error = -EPERM;
67534- if (sysctl_perm(head, table, write ? MAY_WRITE : MAY_READ))
67535+ if (sysctl_perm(head, table, op))
67536 goto out;
67537
67538 /* if that can happen at all, it should be -EINVAL, not -EISDIR */
67539@@ -501,6 +513,27 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
67540 if (!table->proc_handler)
67541 goto out;
67542
67543+#ifdef CONFIG_GRKERNSEC
67544+ error = -EPERM;
67545+ if (gr_handle_chroot_sysctl(op))
67546+ goto out;
67547+ dget(filp->f_path.dentry);
67548+ if (gr_handle_sysctl_mod(filp->f_path.dentry->d_parent->d_name.name, table->procname, op)) {
67549+ dput(filp->f_path.dentry);
67550+ goto out;
67551+ }
67552+ dput(filp->f_path.dentry);
67553+ if (!gr_acl_handle_open(filp->f_path.dentry, filp->f_path.mnt, op))
67554+ goto out;
67555+ if (write) {
67556+ if (current->nsproxy->net_ns != table->extra2) {
67557+ if (!capable(CAP_SYS_ADMIN))
67558+ goto out;
67559+ } else if (!ns_capable(current->nsproxy->net_ns->user_ns, CAP_NET_ADMIN))
67560+ goto out;
67561+ }
67562+#endif
67563+
67564 /* careful: calling conventions are nasty here */
67565 res = count;
67566 error = table->proc_handler(table, write, buf, &res, ppos);
67567@@ -598,6 +631,9 @@ static bool proc_sys_fill_cache(struct file *file,
67568 return false;
67569 } else {
67570 d_set_d_op(child, &proc_sys_dentry_operations);
67571+
67572+ gr_handle_proc_create(child, inode);
67573+
67574 d_add(child, inode);
67575 }
67576 } else {
67577@@ -641,6 +677,9 @@ static int scan(struct ctl_table_header *head, struct ctl_table *table,
67578 if ((*pos)++ < ctx->pos)
67579 return true;
67580
67581+ if (!gr_acl_handle_hidden_file(file->f_path.dentry, file->f_path.mnt))
67582+ return 0;
67583+
67584 if (unlikely(S_ISLNK(table->mode)))
67585 res = proc_sys_link_fill_cache(file, ctx, head, table);
67586 else
67587@@ -734,6 +773,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
67588 if (IS_ERR(head))
67589 return PTR_ERR(head);
67590
67591+ if (table && !gr_acl_handle_hidden_file(dentry, mnt))
67592+ return -ENOENT;
67593+
67594 generic_fillattr(inode, stat);
67595 if (table)
67596 stat->mode = (stat->mode & S_IFMT) | table->mode;
67597@@ -756,13 +798,13 @@ static const struct file_operations proc_sys_dir_file_operations = {
67598 .llseek = generic_file_llseek,
67599 };
67600
67601-static const struct inode_operations proc_sys_inode_operations = {
67602+const struct inode_operations proc_sys_inode_operations = {
67603 .permission = proc_sys_permission,
67604 .setattr = proc_sys_setattr,
67605 .getattr = proc_sys_getattr,
67606 };
67607
67608-static const struct inode_operations proc_sys_dir_operations = {
67609+const struct inode_operations proc_sys_dir_operations = {
67610 .lookup = proc_sys_lookup,
67611 .permission = proc_sys_permission,
67612 .setattr = proc_sys_setattr,
67613@@ -839,7 +881,7 @@ static struct ctl_dir *find_subdir(struct ctl_dir *dir,
67614 static struct ctl_dir *new_dir(struct ctl_table_set *set,
67615 const char *name, int namelen)
67616 {
67617- struct ctl_table *table;
67618+ ctl_table_no_const *table;
67619 struct ctl_dir *new;
67620 struct ctl_node *node;
67621 char *new_name;
67622@@ -851,7 +893,7 @@ static struct ctl_dir *new_dir(struct ctl_table_set *set,
67623 return NULL;
67624
67625 node = (struct ctl_node *)(new + 1);
67626- table = (struct ctl_table *)(node + 1);
67627+ table = (ctl_table_no_const *)(node + 1);
67628 new_name = (char *)(table + 2);
67629 memcpy(new_name, name, namelen);
67630 new_name[namelen] = '\0';
67631@@ -1020,7 +1062,8 @@ static int sysctl_check_table(const char *path, struct ctl_table *table)
67632 static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table *table,
67633 struct ctl_table_root *link_root)
67634 {
67635- struct ctl_table *link_table, *entry, *link;
67636+ ctl_table_no_const *link_table, *link;
67637+ struct ctl_table *entry;
67638 struct ctl_table_header *links;
67639 struct ctl_node *node;
67640 char *link_name;
67641@@ -1043,7 +1086,7 @@ static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table
67642 return NULL;
67643
67644 node = (struct ctl_node *)(links + 1);
67645- link_table = (struct ctl_table *)(node + nr_entries);
67646+ link_table = (ctl_table_no_const *)(node + nr_entries);
67647 link_name = (char *)&link_table[nr_entries + 1];
67648
67649 for (link = link_table, entry = table; entry->procname; link++, entry++) {
67650@@ -1291,8 +1334,8 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
67651 struct ctl_table_header ***subheader, struct ctl_table_set *set,
67652 struct ctl_table *table)
67653 {
67654- struct ctl_table *ctl_table_arg = NULL;
67655- struct ctl_table *entry, *files;
67656+ ctl_table_no_const *ctl_table_arg = NULL, *files = NULL;
67657+ struct ctl_table *entry;
67658 int nr_files = 0;
67659 int nr_dirs = 0;
67660 int err = -ENOMEM;
67661@@ -1304,10 +1347,9 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
67662 nr_files++;
67663 }
67664
67665- files = table;
67666 /* If there are mixed files and directories we need a new table */
67667 if (nr_dirs && nr_files) {
67668- struct ctl_table *new;
67669+ ctl_table_no_const *new;
67670 files = kzalloc(sizeof(struct ctl_table) * (nr_files + 1),
67671 GFP_KERNEL);
67672 if (!files)
67673@@ -1325,7 +1367,7 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
67674 /* Register everything except a directory full of subdirectories */
67675 if (nr_files || !nr_dirs) {
67676 struct ctl_table_header *header;
67677- header = __register_sysctl_table(set, path, files);
67678+ header = __register_sysctl_table(set, path, files ? files : table);
67679 if (!header) {
67680 kfree(ctl_table_arg);
67681 goto out;
67682diff --git a/fs/proc/root.c b/fs/proc/root.c
67683index 094e44d..085a877 100644
67684--- a/fs/proc/root.c
67685+++ b/fs/proc/root.c
67686@@ -188,7 +188,15 @@ void __init proc_root_init(void)
67687 proc_mkdir("openprom", NULL);
67688 #endif
67689 proc_tty_init();
67690+#ifdef CONFIG_GRKERNSEC_PROC_ADD
67691+#ifdef CONFIG_GRKERNSEC_PROC_USER
67692+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
67693+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
67694+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
67695+#endif
67696+#else
67697 proc_mkdir("bus", NULL);
67698+#endif
67699 proc_sys_init();
67700 }
67701
67702diff --git a/fs/proc/stat.c b/fs/proc/stat.c
67703index bf2d03f..f058f9c 100644
67704--- a/fs/proc/stat.c
67705+++ b/fs/proc/stat.c
67706@@ -11,6 +11,7 @@
67707 #include <linux/irqnr.h>
67708 #include <linux/cputime.h>
67709 #include <linux/tick.h>
67710+#include <linux/grsecurity.h>
67711
67712 #ifndef arch_irq_stat_cpu
67713 #define arch_irq_stat_cpu(cpu) 0
67714@@ -87,6 +88,18 @@ static int show_stat(struct seq_file *p, void *v)
67715 u64 sum_softirq = 0;
67716 unsigned int per_softirq_sums[NR_SOFTIRQS] = {0};
67717 struct timespec boottime;
67718+ int unrestricted = 1;
67719+
67720+#ifdef CONFIG_GRKERNSEC_PROC_ADD
67721+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
67722+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)
67723+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
67724+ && !in_group_p(grsec_proc_gid)
67725+#endif
67726+ )
67727+ unrestricted = 0;
67728+#endif
67729+#endif
67730
67731 user = nice = system = idle = iowait =
67732 irq = softirq = steal = 0;
67733@@ -99,23 +112,25 @@ static int show_stat(struct seq_file *p, void *v)
67734 nice += kcpustat_cpu(i).cpustat[CPUTIME_NICE];
67735 system += kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM];
67736 idle += get_idle_time(i);
67737- iowait += get_iowait_time(i);
67738- irq += kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
67739- softirq += kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
67740- steal += kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
67741- guest += kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
67742- guest_nice += kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
67743- sum += kstat_cpu_irqs_sum(i);
67744- sum += arch_irq_stat_cpu(i);
67745+ if (unrestricted) {
67746+ iowait += get_iowait_time(i);
67747+ irq += kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
67748+ softirq += kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
67749+ steal += kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
67750+ guest += kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
67751+ guest_nice += kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
67752+ sum += kstat_cpu_irqs_sum(i);
67753+ sum += arch_irq_stat_cpu(i);
67754+ for (j = 0; j < NR_SOFTIRQS; j++) {
67755+ unsigned int softirq_stat = kstat_softirqs_cpu(j, i);
67756
67757- for (j = 0; j < NR_SOFTIRQS; j++) {
67758- unsigned int softirq_stat = kstat_softirqs_cpu(j, i);
67759-
67760- per_softirq_sums[j] += softirq_stat;
67761- sum_softirq += softirq_stat;
67762+ per_softirq_sums[j] += softirq_stat;
67763+ sum_softirq += softirq_stat;
67764+ }
67765 }
67766 }
67767- sum += arch_irq_stat();
67768+ if (unrestricted)
67769+ sum += arch_irq_stat();
67770
67771 seq_puts(p, "cpu ");
67772 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(user));
67773@@ -136,12 +151,14 @@ static int show_stat(struct seq_file *p, void *v)
67774 nice = kcpustat_cpu(i).cpustat[CPUTIME_NICE];
67775 system = kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM];
67776 idle = get_idle_time(i);
67777- iowait = get_iowait_time(i);
67778- irq = kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
67779- softirq = kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
67780- steal = kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
67781- guest = kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
67782- guest_nice = kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
67783+ if (unrestricted) {
67784+ iowait = get_iowait_time(i);
67785+ irq = kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
67786+ softirq = kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
67787+ steal = kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
67788+ guest = kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
67789+ guest_nice = kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
67790+ }
67791 seq_printf(p, "cpu%d", i);
67792 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(user));
67793 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(nice));
67794@@ -159,7 +176,7 @@ static int show_stat(struct seq_file *p, void *v)
67795
67796 /* sum again ? it could be updated? */
67797 for_each_irq_nr(j)
67798- seq_put_decimal_ull(p, ' ', kstat_irqs(j));
67799+ seq_put_decimal_ull(p, ' ', unrestricted ? kstat_irqs(j) : 0ULL);
67800
67801 seq_printf(p,
67802 "\nctxt %llu\n"
67803@@ -167,11 +184,11 @@ static int show_stat(struct seq_file *p, void *v)
67804 "processes %lu\n"
67805 "procs_running %lu\n"
67806 "procs_blocked %lu\n",
67807- nr_context_switches(),
67808+ unrestricted ? nr_context_switches() : 0ULL,
67809 (unsigned long)jif,
67810- total_forks,
67811- nr_running(),
67812- nr_iowait());
67813+ unrestricted ? total_forks : 0UL,
67814+ unrestricted ? nr_running() : 0UL,
67815+ unrestricted ? nr_iowait() : 0UL);
67816
67817 seq_printf(p, "softirq %llu", (unsigned long long)sum_softirq);
67818
67819diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
67820index c341568..75852a2 100644
67821--- a/fs/proc/task_mmu.c
67822+++ b/fs/proc/task_mmu.c
67823@@ -13,12 +13,19 @@
67824 #include <linux/swap.h>
67825 #include <linux/swapops.h>
67826 #include <linux/mmu_notifier.h>
67827+#include <linux/grsecurity.h>
67828
67829 #include <asm/elf.h>
67830 #include <asm/uaccess.h>
67831 #include <asm/tlbflush.h>
67832 #include "internal.h"
67833
67834+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67835+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
67836+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
67837+ _mm->pax_flags & MF_PAX_SEGMEXEC))
67838+#endif
67839+
67840 void task_mem(struct seq_file *m, struct mm_struct *mm)
67841 {
67842 unsigned long data, text, lib, swap;
67843@@ -54,8 +61,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
67844 "VmExe:\t%8lu kB\n"
67845 "VmLib:\t%8lu kB\n"
67846 "VmPTE:\t%8lu kB\n"
67847- "VmSwap:\t%8lu kB\n",
67848- hiwater_vm << (PAGE_SHIFT-10),
67849+ "VmSwap:\t%8lu kB\n"
67850+
67851+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
67852+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
67853+#endif
67854+
67855+ ,hiwater_vm << (PAGE_SHIFT-10),
67856 total_vm << (PAGE_SHIFT-10),
67857 mm->locked_vm << (PAGE_SHIFT-10),
67858 mm->pinned_vm << (PAGE_SHIFT-10),
67859@@ -65,7 +77,19 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
67860 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
67861 (PTRS_PER_PTE * sizeof(pte_t) *
67862 atomic_long_read(&mm->nr_ptes)) >> 10,
67863- swap << (PAGE_SHIFT-10));
67864+ swap << (PAGE_SHIFT-10)
67865+
67866+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
67867+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67868+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_base
67869+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_limit
67870+#else
67871+ , mm->context.user_cs_base
67872+ , mm->context.user_cs_limit
67873+#endif
67874+#endif
67875+
67876+ );
67877 }
67878
67879 unsigned long task_vsize(struct mm_struct *mm)
67880@@ -271,13 +295,13 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
67881 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
67882 }
67883
67884- /* We don't show the stack guard page in /proc/maps */
67885+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67886+ start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
67887+ end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
67888+#else
67889 start = vma->vm_start;
67890- if (stack_guard_page_start(vma, start))
67891- start += PAGE_SIZE;
67892 end = vma->vm_end;
67893- if (stack_guard_page_end(vma, end))
67894- end -= PAGE_SIZE;
67895+#endif
67896
67897 seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
67898 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
67899@@ -287,7 +311,11 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
67900 flags & VM_WRITE ? 'w' : '-',
67901 flags & VM_EXEC ? 'x' : '-',
67902 flags & VM_MAYSHARE ? 's' : 'p',
67903+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67904+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
67905+#else
67906 pgoff,
67907+#endif
67908 MAJOR(dev), MINOR(dev), ino);
67909
67910 /*
67911@@ -296,7 +324,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
67912 */
67913 if (file) {
67914 seq_pad(m, ' ');
67915- seq_path(m, &file->f_path, "\n");
67916+ seq_path(m, &file->f_path, "\n\\");
67917 goto done;
67918 }
67919
67920@@ -328,8 +356,9 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
67921 * Thread stack in /proc/PID/task/TID/maps or
67922 * the main process stack.
67923 */
67924- if (!is_pid || (vma->vm_start <= mm->start_stack &&
67925- vma->vm_end >= mm->start_stack)) {
67926+ if (!is_pid || (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
67927+ (vma->vm_start <= mm->start_stack &&
67928+ vma->vm_end >= mm->start_stack)) {
67929 name = "[stack]";
67930 } else {
67931 /* Thread stack in /proc/PID/maps */
67932@@ -353,6 +382,13 @@ static int show_map(struct seq_file *m, void *v, int is_pid)
67933 struct proc_maps_private *priv = m->private;
67934 struct task_struct *task = priv->task;
67935
67936+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67937+ if (current->exec_id != m->exec_id) {
67938+ gr_log_badprocpid("maps");
67939+ return 0;
67940+ }
67941+#endif
67942+
67943 show_map_vma(m, vma, is_pid);
67944
67945 if (m->count < m->size) /* vma is copied successfully */
67946@@ -593,12 +629,23 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
67947 .private = &mss,
67948 };
67949
67950+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67951+ if (current->exec_id != m->exec_id) {
67952+ gr_log_badprocpid("smaps");
67953+ return 0;
67954+ }
67955+#endif
67956 memset(&mss, 0, sizeof mss);
67957- mss.vma = vma;
67958- /* mmap_sem is held in m_start */
67959- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
67960- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
67961-
67962+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67963+ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
67964+#endif
67965+ mss.vma = vma;
67966+ /* mmap_sem is held in m_start */
67967+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
67968+ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
67969+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67970+ }
67971+#endif
67972 show_map_vma(m, vma, is_pid);
67973
67974 seq_printf(m,
67975@@ -616,7 +663,11 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
67976 "KernelPageSize: %8lu kB\n"
67977 "MMUPageSize: %8lu kB\n"
67978 "Locked: %8lu kB\n",
67979+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67980+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
67981+#else
67982 (vma->vm_end - vma->vm_start) >> 10,
67983+#endif
67984 mss.resident >> 10,
67985 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
67986 mss.shared_clean >> 10,
67987@@ -1422,6 +1473,13 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
67988 char buffer[64];
67989 int nid;
67990
67991+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67992+ if (current->exec_id != m->exec_id) {
67993+ gr_log_badprocpid("numa_maps");
67994+ return 0;
67995+ }
67996+#endif
67997+
67998 if (!mm)
67999 return 0;
68000
68001@@ -1439,11 +1497,15 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
68002 mpol_to_str(buffer, sizeof(buffer), pol);
68003 mpol_cond_put(pol);
68004
68005+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68006+ seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
68007+#else
68008 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
68009+#endif
68010
68011 if (file) {
68012 seq_puts(m, " file=");
68013- seq_path(m, &file->f_path, "\n\t= ");
68014+ seq_path(m, &file->f_path, "\n\t\\= ");
68015 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
68016 seq_puts(m, " heap");
68017 } else {
68018diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
68019index 678455d..ebd3245 100644
68020--- a/fs/proc/task_nommu.c
68021+++ b/fs/proc/task_nommu.c
68022@@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
68023 else
68024 bytes += kobjsize(mm);
68025
68026- if (current->fs && current->fs->users > 1)
68027+ if (current->fs && atomic_read(&current->fs->users) > 1)
68028 sbytes += kobjsize(current->fs);
68029 else
68030 bytes += kobjsize(current->fs);
68031@@ -161,7 +161,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma,
68032
68033 if (file) {
68034 seq_pad(m, ' ');
68035- seq_path(m, &file->f_path, "");
68036+ seq_path(m, &file->f_path, "\n\\");
68037 } else if (mm) {
68038 pid_t tid = vm_is_stack(priv->task, vma, is_pid);
68039
68040diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
68041index a90d6d35..d08047c 100644
68042--- a/fs/proc/vmcore.c
68043+++ b/fs/proc/vmcore.c
68044@@ -105,9 +105,13 @@ static ssize_t read_from_oldmem(char *buf, size_t count,
68045 nr_bytes = count;
68046
68047 /* If pfn is not ram, return zeros for sparse dump files */
68048- if (pfn_is_ram(pfn) == 0)
68049- memset(buf, 0, nr_bytes);
68050- else {
68051+ if (pfn_is_ram(pfn) == 0) {
68052+ if (userbuf) {
68053+ if (clear_user((char __force_user *)buf, nr_bytes))
68054+ return -EFAULT;
68055+ } else
68056+ memset(buf, 0, nr_bytes);
68057+ } else {
68058 tmp = copy_oldmem_page(pfn, buf, nr_bytes,
68059 offset, userbuf);
68060 if (tmp < 0)
68061@@ -170,7 +174,7 @@ int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma,
68062 static int copy_to(void *target, void *src, size_t size, int userbuf)
68063 {
68064 if (userbuf) {
68065- if (copy_to_user((char __user *) target, src, size))
68066+ if (copy_to_user((char __force_user *) target, src, size))
68067 return -EFAULT;
68068 } else {
68069 memcpy(target, src, size);
68070@@ -233,7 +237,7 @@ static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
68071 if (*fpos < m->offset + m->size) {
68072 tsz = min_t(size_t, m->offset + m->size - *fpos, buflen);
68073 start = m->paddr + *fpos - m->offset;
68074- tmp = read_from_oldmem(buffer, tsz, &start, userbuf);
68075+ tmp = read_from_oldmem((char __force_kernel *)buffer, tsz, &start, userbuf);
68076 if (tmp < 0)
68077 return tmp;
68078 buflen -= tsz;
68079@@ -253,7 +257,7 @@ static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
68080 static ssize_t read_vmcore(struct file *file, char __user *buffer,
68081 size_t buflen, loff_t *fpos)
68082 {
68083- return __read_vmcore((__force char *) buffer, buflen, fpos, 1);
68084+ return __read_vmcore((__force_kernel char *) buffer, buflen, fpos, 1);
68085 }
68086
68087 /*
68088diff --git a/fs/qnx6/qnx6.h b/fs/qnx6/qnx6.h
68089index d3fb2b6..43a8140 100644
68090--- a/fs/qnx6/qnx6.h
68091+++ b/fs/qnx6/qnx6.h
68092@@ -74,7 +74,7 @@ enum {
68093 BYTESEX_BE,
68094 };
68095
68096-static inline __u64 fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n)
68097+static inline __u64 __intentional_overflow(-1) fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n)
68098 {
68099 if (sbi->s_bytesex == BYTESEX_LE)
68100 return le64_to_cpu((__force __le64)n);
68101@@ -90,7 +90,7 @@ static inline __fs64 cpu_to_fs64(struct qnx6_sb_info *sbi, __u64 n)
68102 return (__force __fs64)cpu_to_be64(n);
68103 }
68104
68105-static inline __u32 fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n)
68106+static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n)
68107 {
68108 if (sbi->s_bytesex == BYTESEX_LE)
68109 return le32_to_cpu((__force __le32)n);
68110diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
68111index bb2869f..d34ada8 100644
68112--- a/fs/quota/netlink.c
68113+++ b/fs/quota/netlink.c
68114@@ -44,7 +44,7 @@ static struct genl_family quota_genl_family = {
68115 void quota_send_warning(struct kqid qid, dev_t dev,
68116 const char warntype)
68117 {
68118- static atomic_t seq;
68119+ static atomic_unchecked_t seq;
68120 struct sk_buff *skb;
68121 void *msg_head;
68122 int ret;
68123@@ -60,7 +60,7 @@ void quota_send_warning(struct kqid qid, dev_t dev,
68124 "VFS: Not enough memory to send quota warning.\n");
68125 return;
68126 }
68127- msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
68128+ msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
68129 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
68130 if (!msg_head) {
68131 printk(KERN_ERR
68132diff --git a/fs/read_write.c b/fs/read_write.c
68133index 009d854..16ce214 100644
68134--- a/fs/read_write.c
68135+++ b/fs/read_write.c
68136@@ -495,7 +495,7 @@ ssize_t __kernel_write(struct file *file, const char *buf, size_t count, loff_t
68137
68138 old_fs = get_fs();
68139 set_fs(get_ds());
68140- p = (__force const char __user *)buf;
68141+ p = (const char __force_user *)buf;
68142 if (count > MAX_RW_COUNT)
68143 count = MAX_RW_COUNT;
68144 if (file->f_op->write)
68145diff --git a/fs/readdir.c b/fs/readdir.c
68146index 33fd922..e0d6094 100644
68147--- a/fs/readdir.c
68148+++ b/fs/readdir.c
68149@@ -18,6 +18,7 @@
68150 #include <linux/security.h>
68151 #include <linux/syscalls.h>
68152 #include <linux/unistd.h>
68153+#include <linux/namei.h>
68154
68155 #include <asm/uaccess.h>
68156
68157@@ -71,6 +72,7 @@ struct old_linux_dirent {
68158 struct readdir_callback {
68159 struct dir_context ctx;
68160 struct old_linux_dirent __user * dirent;
68161+ struct file * file;
68162 int result;
68163 };
68164
68165@@ -88,6 +90,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
68166 buf->result = -EOVERFLOW;
68167 return -EOVERFLOW;
68168 }
68169+
68170+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
68171+ return 0;
68172+
68173 buf->result++;
68174 dirent = buf->dirent;
68175 if (!access_ok(VERIFY_WRITE, dirent,
68176@@ -119,6 +125,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
68177 if (!f.file)
68178 return -EBADF;
68179
68180+ buf.file = f.file;
68181 error = iterate_dir(f.file, &buf.ctx);
68182 if (buf.result)
68183 error = buf.result;
68184@@ -144,6 +151,7 @@ struct getdents_callback {
68185 struct dir_context ctx;
68186 struct linux_dirent __user * current_dir;
68187 struct linux_dirent __user * previous;
68188+ struct file * file;
68189 int count;
68190 int error;
68191 };
68192@@ -165,6 +173,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
68193 buf->error = -EOVERFLOW;
68194 return -EOVERFLOW;
68195 }
68196+
68197+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
68198+ return 0;
68199+
68200 dirent = buf->previous;
68201 if (dirent) {
68202 if (__put_user(offset, &dirent->d_off))
68203@@ -210,6 +222,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
68204 if (!f.file)
68205 return -EBADF;
68206
68207+ buf.file = f.file;
68208 error = iterate_dir(f.file, &buf.ctx);
68209 if (error >= 0)
68210 error = buf.error;
68211@@ -228,6 +241,7 @@ struct getdents_callback64 {
68212 struct dir_context ctx;
68213 struct linux_dirent64 __user * current_dir;
68214 struct linux_dirent64 __user * previous;
68215+ struct file *file;
68216 int count;
68217 int error;
68218 };
68219@@ -243,6 +257,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
68220 buf->error = -EINVAL; /* only used if we fail.. */
68221 if (reclen > buf->count)
68222 return -EINVAL;
68223+
68224+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
68225+ return 0;
68226+
68227 dirent = buf->previous;
68228 if (dirent) {
68229 if (__put_user(offset, &dirent->d_off))
68230@@ -290,6 +308,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
68231 if (!f.file)
68232 return -EBADF;
68233
68234+ buf.file = f.file;
68235 error = iterate_dir(f.file, &buf.ctx);
68236 if (error >= 0)
68237 error = buf.error;
68238diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
68239index 9c02d96..6562c10 100644
68240--- a/fs/reiserfs/do_balan.c
68241+++ b/fs/reiserfs/do_balan.c
68242@@ -1887,7 +1887,7 @@ void do_balance(struct tree_balance *tb, struct item_head *ih,
68243 return;
68244 }
68245
68246- atomic_inc(&fs_generation(tb->tb_sb));
68247+ atomic_inc_unchecked(&fs_generation(tb->tb_sb));
68248 do_balance_starts(tb);
68249
68250 /*
68251diff --git a/fs/reiserfs/item_ops.c b/fs/reiserfs/item_ops.c
68252index aca73dd..e3c558d 100644
68253--- a/fs/reiserfs/item_ops.c
68254+++ b/fs/reiserfs/item_ops.c
68255@@ -724,18 +724,18 @@ static void errcatch_print_vi(struct virtual_item *vi)
68256 }
68257
68258 static struct item_operations errcatch_ops = {
68259- errcatch_bytes_number,
68260- errcatch_decrement_key,
68261- errcatch_is_left_mergeable,
68262- errcatch_print_item,
68263- errcatch_check_item,
68264+ .bytes_number = errcatch_bytes_number,
68265+ .decrement_key = errcatch_decrement_key,
68266+ .is_left_mergeable = errcatch_is_left_mergeable,
68267+ .print_item = errcatch_print_item,
68268+ .check_item = errcatch_check_item,
68269
68270- errcatch_create_vi,
68271- errcatch_check_left,
68272- errcatch_check_right,
68273- errcatch_part_size,
68274- errcatch_unit_num,
68275- errcatch_print_vi
68276+ .create_vi = errcatch_create_vi,
68277+ .check_left = errcatch_check_left,
68278+ .check_right = errcatch_check_right,
68279+ .part_size = errcatch_part_size,
68280+ .unit_num = errcatch_unit_num,
68281+ .print_vi = errcatch_print_vi
68282 };
68283
68284 #if ! (TYPE_STAT_DATA == 0 && TYPE_INDIRECT == 1 && TYPE_DIRECT == 2 && TYPE_DIRENTRY == 3)
68285diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
68286index 621b9f3..af527fd 100644
68287--- a/fs/reiserfs/procfs.c
68288+++ b/fs/reiserfs/procfs.c
68289@@ -114,7 +114,7 @@ static int show_super(struct seq_file *m, void *unused)
68290 "SMALL_TAILS " : "NO_TAILS ",
68291 replay_only(sb) ? "REPLAY_ONLY " : "",
68292 convert_reiserfs(sb) ? "CONV " : "",
68293- atomic_read(&r->s_generation_counter),
68294+ atomic_read_unchecked(&r->s_generation_counter),
68295 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
68296 SF(s_do_balance), SF(s_unneeded_left_neighbor),
68297 SF(s_good_search_by_key_reada), SF(s_bmaps),
68298diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
68299index 735c2c2..81b91af 100644
68300--- a/fs/reiserfs/reiserfs.h
68301+++ b/fs/reiserfs/reiserfs.h
68302@@ -573,7 +573,7 @@ struct reiserfs_sb_info {
68303 /* Comment? -Hans */
68304 wait_queue_head_t s_wait;
68305 /* increased by one every time the tree gets re-balanced */
68306- atomic_t s_generation_counter;
68307+ atomic_unchecked_t s_generation_counter;
68308
68309 /* File system properties. Currently holds on-disk FS format */
68310 unsigned long s_properties;
68311@@ -2294,7 +2294,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
68312 #define REISERFS_USER_MEM 1 /* user memory mode */
68313
68314 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
68315-#define get_generation(s) atomic_read (&fs_generation(s))
68316+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
68317 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
68318 #define __fs_changed(gen,s) (gen != get_generation (s))
68319 #define fs_changed(gen,s) \
68320diff --git a/fs/select.c b/fs/select.c
68321index 467bb1c..cf9d65a 100644
68322--- a/fs/select.c
68323+++ b/fs/select.c
68324@@ -20,6 +20,7 @@
68325 #include <linux/export.h>
68326 #include <linux/slab.h>
68327 #include <linux/poll.h>
68328+#include <linux/security.h>
68329 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
68330 #include <linux/file.h>
68331 #include <linux/fdtable.h>
68332@@ -880,6 +881,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
68333 struct poll_list *walk = head;
68334 unsigned long todo = nfds;
68335
68336+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
68337 if (nfds > rlimit(RLIMIT_NOFILE))
68338 return -EINVAL;
68339
68340diff --git a/fs/seq_file.c b/fs/seq_file.c
68341index 3857b72..0b7281e 100644
68342--- a/fs/seq_file.c
68343+++ b/fs/seq_file.c
68344@@ -12,6 +12,8 @@
68345 #include <linux/slab.h>
68346 #include <linux/cred.h>
68347 #include <linux/mm.h>
68348+#include <linux/sched.h>
68349+#include <linux/grsecurity.h>
68350
68351 #include <asm/uaccess.h>
68352 #include <asm/page.h>
68353@@ -34,12 +36,7 @@ static void seq_set_overflow(struct seq_file *m)
68354
68355 static void *seq_buf_alloc(unsigned long size)
68356 {
68357- void *buf;
68358-
68359- buf = kmalloc(size, GFP_KERNEL | __GFP_NOWARN);
68360- if (!buf && size > PAGE_SIZE)
68361- buf = vmalloc(size);
68362- return buf;
68363+ return kmalloc(size, GFP_KERNEL | GFP_USERCOPY);
68364 }
68365
68366 /**
68367@@ -72,6 +69,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
68368 #ifdef CONFIG_USER_NS
68369 p->user_ns = file->f_cred->user_ns;
68370 #endif
68371+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68372+ p->exec_id = current->exec_id;
68373+#endif
68374
68375 /*
68376 * Wrappers around seq_open(e.g. swaps_open) need to be
68377@@ -94,6 +94,16 @@ int seq_open(struct file *file, const struct seq_operations *op)
68378 }
68379 EXPORT_SYMBOL(seq_open);
68380
68381+
68382+int seq_open_restrict(struct file *file, const struct seq_operations *op)
68383+{
68384+ if (gr_proc_is_restricted())
68385+ return -EACCES;
68386+
68387+ return seq_open(file, op);
68388+}
68389+EXPORT_SYMBOL(seq_open_restrict);
68390+
68391 static int traverse(struct seq_file *m, loff_t offset)
68392 {
68393 loff_t pos = 0, index;
68394@@ -165,7 +175,7 @@ Eoverflow:
68395 ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
68396 {
68397 struct seq_file *m = file->private_data;
68398- size_t copied = 0;
68399+ ssize_t copied = 0;
68400 loff_t pos;
68401 size_t n;
68402 void *p;
68403@@ -596,7 +606,7 @@ static void single_stop(struct seq_file *p, void *v)
68404 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
68405 void *data)
68406 {
68407- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
68408+ seq_operations_no_const *op = kzalloc(sizeof(*op), GFP_KERNEL);
68409 int res = -ENOMEM;
68410
68411 if (op) {
68412@@ -632,6 +642,17 @@ int single_open_size(struct file *file, int (*show)(struct seq_file *, void *),
68413 }
68414 EXPORT_SYMBOL(single_open_size);
68415
68416+int single_open_restrict(struct file *file, int (*show)(struct seq_file *, void *),
68417+ void *data)
68418+{
68419+ if (gr_proc_is_restricted())
68420+ return -EACCES;
68421+
68422+ return single_open(file, show, data);
68423+}
68424+EXPORT_SYMBOL(single_open_restrict);
68425+
68426+
68427 int single_release(struct inode *inode, struct file *file)
68428 {
68429 const struct seq_operations *op = ((struct seq_file *)file->private_data)->op;
68430diff --git a/fs/splice.c b/fs/splice.c
68431index f5cb9ba..8ddb1e9 100644
68432--- a/fs/splice.c
68433+++ b/fs/splice.c
68434@@ -193,7 +193,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
68435 pipe_lock(pipe);
68436
68437 for (;;) {
68438- if (!pipe->readers) {
68439+ if (!atomic_read(&pipe->readers)) {
68440 send_sig(SIGPIPE, current, 0);
68441 if (!ret)
68442 ret = -EPIPE;
68443@@ -216,7 +216,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
68444 page_nr++;
68445 ret += buf->len;
68446
68447- if (pipe->files)
68448+ if (atomic_read(&pipe->files))
68449 do_wakeup = 1;
68450
68451 if (!--spd->nr_pages)
68452@@ -247,9 +247,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
68453 do_wakeup = 0;
68454 }
68455
68456- pipe->waiting_writers++;
68457+ atomic_inc(&pipe->waiting_writers);
68458 pipe_wait(pipe);
68459- pipe->waiting_writers--;
68460+ atomic_dec(&pipe->waiting_writers);
68461 }
68462
68463 pipe_unlock(pipe);
68464@@ -576,7 +576,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
68465 old_fs = get_fs();
68466 set_fs(get_ds());
68467 /* The cast to a user pointer is valid due to the set_fs() */
68468- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
68469+ res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
68470 set_fs(old_fs);
68471
68472 return res;
68473@@ -591,7 +591,7 @@ ssize_t kernel_write(struct file *file, const char *buf, size_t count,
68474 old_fs = get_fs();
68475 set_fs(get_ds());
68476 /* The cast to a user pointer is valid due to the set_fs() */
68477- res = vfs_write(file, (__force const char __user *)buf, count, &pos);
68478+ res = vfs_write(file, (const char __force_user *)buf, count, &pos);
68479 set_fs(old_fs);
68480
68481 return res;
68482@@ -644,7 +644,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
68483 goto err;
68484
68485 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
68486- vec[i].iov_base = (void __user *) page_address(page);
68487+ vec[i].iov_base = (void __force_user *) page_address(page);
68488 vec[i].iov_len = this_len;
68489 spd.pages[i] = page;
68490 spd.nr_pages++;
68491@@ -783,7 +783,7 @@ static int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_des
68492 ops->release(pipe, buf);
68493 pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
68494 pipe->nrbufs--;
68495- if (pipe->files)
68496+ if (atomic_read(&pipe->files))
68497 sd->need_wakeup = true;
68498 }
68499
68500@@ -807,10 +807,10 @@ static int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_des
68501 static int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
68502 {
68503 while (!pipe->nrbufs) {
68504- if (!pipe->writers)
68505+ if (!atomic_read(&pipe->writers))
68506 return 0;
68507
68508- if (!pipe->waiting_writers && sd->num_spliced)
68509+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
68510 return 0;
68511
68512 if (sd->flags & SPLICE_F_NONBLOCK)
68513@@ -1040,7 +1040,7 @@ iter_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
68514 ops->release(pipe, buf);
68515 pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
68516 pipe->nrbufs--;
68517- if (pipe->files)
68518+ if (atomic_read(&pipe->files))
68519 sd.need_wakeup = true;
68520 } else {
68521 buf->offset += ret;
68522@@ -1200,7 +1200,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
68523 * out of the pipe right after the splice_to_pipe(). So set
68524 * PIPE_READERS appropriately.
68525 */
68526- pipe->readers = 1;
68527+ atomic_set(&pipe->readers, 1);
68528
68529 current->splice_pipe = pipe;
68530 }
68531@@ -1496,6 +1496,7 @@ static int get_iovec_page_array(const struct iovec __user *iov,
68532
68533 partial[buffers].offset = off;
68534 partial[buffers].len = plen;
68535+ partial[buffers].private = 0;
68536
68537 off = 0;
68538 len -= plen;
68539@@ -1732,9 +1733,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
68540 ret = -ERESTARTSYS;
68541 break;
68542 }
68543- if (!pipe->writers)
68544+ if (!atomic_read(&pipe->writers))
68545 break;
68546- if (!pipe->waiting_writers) {
68547+ if (!atomic_read(&pipe->waiting_writers)) {
68548 if (flags & SPLICE_F_NONBLOCK) {
68549 ret = -EAGAIN;
68550 break;
68551@@ -1766,7 +1767,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
68552 pipe_lock(pipe);
68553
68554 while (pipe->nrbufs >= pipe->buffers) {
68555- if (!pipe->readers) {
68556+ if (!atomic_read(&pipe->readers)) {
68557 send_sig(SIGPIPE, current, 0);
68558 ret = -EPIPE;
68559 break;
68560@@ -1779,9 +1780,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
68561 ret = -ERESTARTSYS;
68562 break;
68563 }
68564- pipe->waiting_writers++;
68565+ atomic_inc(&pipe->waiting_writers);
68566 pipe_wait(pipe);
68567- pipe->waiting_writers--;
68568+ atomic_dec(&pipe->waiting_writers);
68569 }
68570
68571 pipe_unlock(pipe);
68572@@ -1817,14 +1818,14 @@ retry:
68573 pipe_double_lock(ipipe, opipe);
68574
68575 do {
68576- if (!opipe->readers) {
68577+ if (!atomic_read(&opipe->readers)) {
68578 send_sig(SIGPIPE, current, 0);
68579 if (!ret)
68580 ret = -EPIPE;
68581 break;
68582 }
68583
68584- if (!ipipe->nrbufs && !ipipe->writers)
68585+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
68586 break;
68587
68588 /*
68589@@ -1921,7 +1922,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
68590 pipe_double_lock(ipipe, opipe);
68591
68592 do {
68593- if (!opipe->readers) {
68594+ if (!atomic_read(&opipe->readers)) {
68595 send_sig(SIGPIPE, current, 0);
68596 if (!ret)
68597 ret = -EPIPE;
68598@@ -1966,7 +1967,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
68599 * return EAGAIN if we have the potential of some data in the
68600 * future, otherwise just return 0
68601 */
68602- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
68603+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
68604 ret = -EAGAIN;
68605
68606 pipe_unlock(ipipe);
68607diff --git a/fs/stat.c b/fs/stat.c
68608index ae0c3ce..9ee641c 100644
68609--- a/fs/stat.c
68610+++ b/fs/stat.c
68611@@ -28,8 +28,13 @@ void generic_fillattr(struct inode *inode, struct kstat *stat)
68612 stat->gid = inode->i_gid;
68613 stat->rdev = inode->i_rdev;
68614 stat->size = i_size_read(inode);
68615- stat->atime = inode->i_atime;
68616- stat->mtime = inode->i_mtime;
68617+ if (is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
68618+ stat->atime = inode->i_ctime;
68619+ stat->mtime = inode->i_ctime;
68620+ } else {
68621+ stat->atime = inode->i_atime;
68622+ stat->mtime = inode->i_mtime;
68623+ }
68624 stat->ctime = inode->i_ctime;
68625 stat->blksize = (1 << inode->i_blkbits);
68626 stat->blocks = inode->i_blocks;
68627@@ -52,9 +57,16 @@ EXPORT_SYMBOL(generic_fillattr);
68628 int vfs_getattr_nosec(struct path *path, struct kstat *stat)
68629 {
68630 struct inode *inode = path->dentry->d_inode;
68631+ int retval;
68632
68633- if (inode->i_op->getattr)
68634- return inode->i_op->getattr(path->mnt, path->dentry, stat);
68635+ if (inode->i_op->getattr) {
68636+ retval = inode->i_op->getattr(path->mnt, path->dentry, stat);
68637+ if (!retval && is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
68638+ stat->atime = stat->ctime;
68639+ stat->mtime = stat->ctime;
68640+ }
68641+ return retval;
68642+ }
68643
68644 generic_fillattr(inode, stat);
68645 return 0;
68646diff --git a/fs/super.c b/fs/super.c
68647index b9a214d..6f8c954 100644
68648--- a/fs/super.c
68649+++ b/fs/super.c
68650@@ -80,6 +80,8 @@ static unsigned long super_cache_scan(struct shrinker *shrink,
68651 inodes = list_lru_count_node(&sb->s_inode_lru, sc->nid);
68652 dentries = list_lru_count_node(&sb->s_dentry_lru, sc->nid);
68653 total_objects = dentries + inodes + fs_objects + 1;
68654+ if (!total_objects)
68655+ total_objects = 1;
68656
68657 /* proportion the scan between the caches */
68658 dentries = mult_frac(sc->nr_to_scan, dentries, total_objects);
68659diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
68660index 0b45ff4..847de5b 100644
68661--- a/fs/sysfs/dir.c
68662+++ b/fs/sysfs/dir.c
68663@@ -41,9 +41,16 @@ void sysfs_warn_dup(struct kernfs_node *parent, const char *name)
68664 int sysfs_create_dir_ns(struct kobject *kobj, const void *ns)
68665 {
68666 struct kernfs_node *parent, *kn;
68667+ const char *name;
68668+ umode_t mode = S_IRWXU | S_IRUGO | S_IXUGO;
68669+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
68670+ const char *parent_name;
68671+#endif
68672
68673 BUG_ON(!kobj);
68674
68675+ name = kobject_name(kobj);
68676+
68677 if (kobj->parent)
68678 parent = kobj->parent->sd;
68679 else
68680@@ -52,11 +59,22 @@ int sysfs_create_dir_ns(struct kobject *kobj, const void *ns)
68681 if (!parent)
68682 return -ENOENT;
68683
68684- kn = kernfs_create_dir_ns(parent, kobject_name(kobj),
68685- S_IRWXU | S_IRUGO | S_IXUGO, kobj, ns);
68686+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
68687+ parent_name = parent->name;
68688+ mode = S_IRWXU;
68689+
68690+ if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
68691+ (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
68692+ (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse") || !strcmp(name, "ecryptfs"))) ||
68693+ (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
68694+ mode = S_IRWXU | S_IRUGO | S_IXUGO;
68695+#endif
68696+
68697+ kn = kernfs_create_dir_ns(parent, name,
68698+ mode, kobj, ns);
68699 if (IS_ERR(kn)) {
68700 if (PTR_ERR(kn) == -EEXIST)
68701- sysfs_warn_dup(parent, kobject_name(kobj));
68702+ sysfs_warn_dup(parent, name);
68703 return PTR_ERR(kn);
68704 }
68705
68706diff --git a/fs/sysv/sysv.h b/fs/sysv/sysv.h
68707index 69d4889..a810bd4 100644
68708--- a/fs/sysv/sysv.h
68709+++ b/fs/sysv/sysv.h
68710@@ -188,7 +188,7 @@ static inline u32 PDP_swab(u32 x)
68711 #endif
68712 }
68713
68714-static inline __u32 fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
68715+static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
68716 {
68717 if (sbi->s_bytesex == BYTESEX_PDP)
68718 return PDP_swab((__force __u32)n);
68719diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c
68720index fb08b0c..65fcc7e 100644
68721--- a/fs/ubifs/io.c
68722+++ b/fs/ubifs/io.c
68723@@ -155,7 +155,7 @@ int ubifs_leb_change(struct ubifs_info *c, int lnum, const void *buf, int len)
68724 return err;
68725 }
68726
68727-int ubifs_leb_unmap(struct ubifs_info *c, int lnum)
68728+int __intentional_overflow(-1) ubifs_leb_unmap(struct ubifs_info *c, int lnum)
68729 {
68730 int err;
68731
68732diff --git a/fs/udf/misc.c b/fs/udf/misc.c
68733index c175b4d..8f36a16 100644
68734--- a/fs/udf/misc.c
68735+++ b/fs/udf/misc.c
68736@@ -289,7 +289,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
68737
68738 u8 udf_tag_checksum(const struct tag *t)
68739 {
68740- u8 *data = (u8 *)t;
68741+ const u8 *data = (const u8 *)t;
68742 u8 checksum = 0;
68743 int i;
68744 for (i = 0; i < sizeof(struct tag); ++i)
68745diff --git a/fs/ufs/swab.h b/fs/ufs/swab.h
68746index 8d974c4..b82f6ec 100644
68747--- a/fs/ufs/swab.h
68748+++ b/fs/ufs/swab.h
68749@@ -22,7 +22,7 @@ enum {
68750 BYTESEX_BE
68751 };
68752
68753-static inline u64
68754+static inline u64 __intentional_overflow(-1)
68755 fs64_to_cpu(struct super_block *sbp, __fs64 n)
68756 {
68757 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
68758@@ -40,7 +40,7 @@ cpu_to_fs64(struct super_block *sbp, u64 n)
68759 return (__force __fs64)cpu_to_be64(n);
68760 }
68761
68762-static inline u32
68763+static inline u32 __intentional_overflow(-1)
68764 fs32_to_cpu(struct super_block *sbp, __fs32 n)
68765 {
68766 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
68767diff --git a/fs/utimes.c b/fs/utimes.c
68768index aa138d6..5f3a811 100644
68769--- a/fs/utimes.c
68770+++ b/fs/utimes.c
68771@@ -1,6 +1,7 @@
68772 #include <linux/compiler.h>
68773 #include <linux/file.h>
68774 #include <linux/fs.h>
68775+#include <linux/security.h>
68776 #include <linux/linkage.h>
68777 #include <linux/mount.h>
68778 #include <linux/namei.h>
68779@@ -103,6 +104,12 @@ static int utimes_common(struct path *path, struct timespec *times)
68780 }
68781 }
68782 retry_deleg:
68783+
68784+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
68785+ error = -EACCES;
68786+ goto mnt_drop_write_and_out;
68787+ }
68788+
68789 mutex_lock(&inode->i_mutex);
68790 error = notify_change(path->dentry, &newattrs, &delegated_inode);
68791 mutex_unlock(&inode->i_mutex);
68792diff --git a/fs/xattr.c b/fs/xattr.c
68793index c69e6d4..cc56af5 100644
68794--- a/fs/xattr.c
68795+++ b/fs/xattr.c
68796@@ -227,6 +227,27 @@ int vfs_xattr_cmp(struct dentry *dentry, const char *xattr_name,
68797 return rc;
68798 }
68799
68800+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
68801+ssize_t
68802+pax_getxattr(struct dentry *dentry, void *value, size_t size)
68803+{
68804+ struct inode *inode = dentry->d_inode;
68805+ ssize_t error;
68806+
68807+ error = inode_permission(inode, MAY_EXEC);
68808+ if (error)
68809+ return error;
68810+
68811+ if (inode->i_op->getxattr)
68812+ error = inode->i_op->getxattr(dentry, XATTR_NAME_PAX_FLAGS, value, size);
68813+ else
68814+ error = -EOPNOTSUPP;
68815+
68816+ return error;
68817+}
68818+EXPORT_SYMBOL(pax_getxattr);
68819+#endif
68820+
68821 ssize_t
68822 vfs_getxattr(struct dentry *dentry, const char *name, void *value, size_t size)
68823 {
68824@@ -319,7 +340,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
68825 * Extended attribute SET operations
68826 */
68827 static long
68828-setxattr(struct dentry *d, const char __user *name, const void __user *value,
68829+setxattr(struct path *path, const char __user *name, const void __user *value,
68830 size_t size, int flags)
68831 {
68832 int error;
68833@@ -355,7 +376,12 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
68834 posix_acl_fix_xattr_from_user(kvalue, size);
68835 }
68836
68837- error = vfs_setxattr(d, kname, kvalue, size, flags);
68838+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
68839+ error = -EACCES;
68840+ goto out;
68841+ }
68842+
68843+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
68844 out:
68845 if (vvalue)
68846 vfree(vvalue);
68847@@ -377,7 +403,7 @@ retry:
68848 return error;
68849 error = mnt_want_write(path.mnt);
68850 if (!error) {
68851- error = setxattr(path.dentry, name, value, size, flags);
68852+ error = setxattr(&path, name, value, size, flags);
68853 mnt_drop_write(path.mnt);
68854 }
68855 path_put(&path);
68856@@ -401,7 +427,7 @@ retry:
68857 return error;
68858 error = mnt_want_write(path.mnt);
68859 if (!error) {
68860- error = setxattr(path.dentry, name, value, size, flags);
68861+ error = setxattr(&path, name, value, size, flags);
68862 mnt_drop_write(path.mnt);
68863 }
68864 path_put(&path);
68865@@ -416,16 +442,14 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
68866 const void __user *,value, size_t, size, int, flags)
68867 {
68868 struct fd f = fdget(fd);
68869- struct dentry *dentry;
68870 int error = -EBADF;
68871
68872 if (!f.file)
68873 return error;
68874- dentry = f.file->f_path.dentry;
68875- audit_inode(NULL, dentry, 0);
68876+ audit_inode(NULL, f.file->f_path.dentry, 0);
68877 error = mnt_want_write_file(f.file);
68878 if (!error) {
68879- error = setxattr(dentry, name, value, size, flags);
68880+ error = setxattr(&f.file->f_path, name, value, size, flags);
68881 mnt_drop_write_file(f.file);
68882 }
68883 fdput(f);
68884@@ -626,7 +650,7 @@ SYSCALL_DEFINE3(flistxattr, int, fd, char __user *, list, size_t, size)
68885 * Extended attribute REMOVE operations
68886 */
68887 static long
68888-removexattr(struct dentry *d, const char __user *name)
68889+removexattr(struct path *path, const char __user *name)
68890 {
68891 int error;
68892 char kname[XATTR_NAME_MAX + 1];
68893@@ -637,7 +661,10 @@ removexattr(struct dentry *d, const char __user *name)
68894 if (error < 0)
68895 return error;
68896
68897- return vfs_removexattr(d, kname);
68898+ if (!gr_acl_handle_removexattr(path->dentry, path->mnt))
68899+ return -EACCES;
68900+
68901+ return vfs_removexattr(path->dentry, kname);
68902 }
68903
68904 SYSCALL_DEFINE2(removexattr, const char __user *, pathname,
68905@@ -652,7 +679,7 @@ retry:
68906 return error;
68907 error = mnt_want_write(path.mnt);
68908 if (!error) {
68909- error = removexattr(path.dentry, name);
68910+ error = removexattr(&path, name);
68911 mnt_drop_write(path.mnt);
68912 }
68913 path_put(&path);
68914@@ -675,7 +702,7 @@ retry:
68915 return error;
68916 error = mnt_want_write(path.mnt);
68917 if (!error) {
68918- error = removexattr(path.dentry, name);
68919+ error = removexattr(&path, name);
68920 mnt_drop_write(path.mnt);
68921 }
68922 path_put(&path);
68923@@ -689,16 +716,16 @@ retry:
68924 SYSCALL_DEFINE2(fremovexattr, int, fd, const char __user *, name)
68925 {
68926 struct fd f = fdget(fd);
68927- struct dentry *dentry;
68928+ struct path *path;
68929 int error = -EBADF;
68930
68931 if (!f.file)
68932 return error;
68933- dentry = f.file->f_path.dentry;
68934- audit_inode(NULL, dentry, 0);
68935+ path = &f.file->f_path;
68936+ audit_inode(NULL, path->dentry, 0);
68937 error = mnt_want_write_file(f.file);
68938 if (!error) {
68939- error = removexattr(dentry, name);
68940+ error = removexattr(path, name);
68941 mnt_drop_write_file(f.file);
68942 }
68943 fdput(f);
68944diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
68945index 86df952..ac430d6 100644
68946--- a/fs/xfs/libxfs/xfs_bmap.c
68947+++ b/fs/xfs/libxfs/xfs_bmap.c
68948@@ -583,7 +583,7 @@ xfs_bmap_validate_ret(
68949
68950 #else
68951 #define xfs_bmap_check_leaf_extents(cur, ip, whichfork) do { } while (0)
68952-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
68953+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do { } while (0)
68954 #endif /* DEBUG */
68955
68956 /*
68957diff --git a/fs/xfs/xfs_dir2_readdir.c b/fs/xfs/xfs_dir2_readdir.c
68958index f1b69ed..3d0222f 100644
68959--- a/fs/xfs/xfs_dir2_readdir.c
68960+++ b/fs/xfs/xfs_dir2_readdir.c
68961@@ -159,7 +159,12 @@ xfs_dir2_sf_getdents(
68962 ino = dp->d_ops->sf_get_ino(sfp, sfep);
68963 filetype = dp->d_ops->sf_get_ftype(sfep);
68964 ctx->pos = off & 0x7fffffff;
68965- if (!dir_emit(ctx, (char *)sfep->name, sfep->namelen, ino,
68966+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
68967+ char name[sfep->namelen];
68968+ memcpy(name, sfep->name, sfep->namelen);
68969+ if (!dir_emit(ctx, name, sfep->namelen, ino, xfs_dir3_get_dtype(dp->i_mount, filetype)))
68970+ return 0;
68971+ } else if (!dir_emit(ctx, (char *)sfep->name, sfep->namelen, ino,
68972 xfs_dir3_get_dtype(dp->i_mount, filetype)))
68973 return 0;
68974 sfep = dp->d_ops->sf_nextentry(sfp, sfep);
68975diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
68976index 3799695..0ddc953 100644
68977--- a/fs/xfs/xfs_ioctl.c
68978+++ b/fs/xfs/xfs_ioctl.c
68979@@ -122,7 +122,7 @@ xfs_find_handle(
68980 }
68981
68982 error = -EFAULT;
68983- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
68984+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
68985 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
68986 goto out_put;
68987
68988diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
68989new file mode 100644
68990index 0000000..f27264e
68991--- /dev/null
68992+++ b/grsecurity/Kconfig
68993@@ -0,0 +1,1166 @@
68994+#
68995+# grecurity configuration
68996+#
68997+menu "Memory Protections"
68998+depends on GRKERNSEC
68999+
69000+config GRKERNSEC_KMEM
69001+ bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
69002+ default y if GRKERNSEC_CONFIG_AUTO
69003+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
69004+ help
69005+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
69006+ be written to or read from to modify or leak the contents of the running
69007+ kernel. /dev/port will also not be allowed to be opened, writing to
69008+ /dev/cpu/*/msr will be prevented, and support for kexec will be removed.
69009+ If you have module support disabled, enabling this will close up several
69010+ ways that are currently used to insert malicious code into the running
69011+ kernel.
69012+
69013+ Even with this feature enabled, we still highly recommend that
69014+ you use the RBAC system, as it is still possible for an attacker to
69015+ modify the running kernel through other more obscure methods.
69016+
69017+ It is highly recommended that you say Y here if you meet all the
69018+ conditions above.
69019+
69020+config GRKERNSEC_VM86
69021+ bool "Restrict VM86 mode"
69022+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
69023+ depends on X86_32
69024+
69025+ help
69026+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
69027+ make use of a special execution mode on 32bit x86 processors called
69028+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
69029+ video cards and will still work with this option enabled. The purpose
69030+ of the option is to prevent exploitation of emulation errors in
69031+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
69032+ Nearly all users should be able to enable this option.
69033+
69034+config GRKERNSEC_IO
69035+ bool "Disable privileged I/O"
69036+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
69037+ depends on X86
69038+ select RTC_CLASS
69039+ select RTC_INTF_DEV
69040+ select RTC_DRV_CMOS
69041+
69042+ help
69043+ If you say Y here, all ioperm and iopl calls will return an error.
69044+ Ioperm and iopl can be used to modify the running kernel.
69045+ Unfortunately, some programs need this access to operate properly,
69046+ the most notable of which are XFree86 and hwclock. hwclock can be
69047+ remedied by having RTC support in the kernel, so real-time
69048+ clock support is enabled if this option is enabled, to ensure
69049+ that hwclock operates correctly. If hwclock still does not work,
69050+ either update udev or symlink /dev/rtc to /dev/rtc0.
69051+
69052+ If you're using XFree86 or a version of Xorg from 2012 or earlier,
69053+ you may not be able to boot into a graphical environment with this
69054+ option enabled. In this case, you should use the RBAC system instead.
69055+
69056+config GRKERNSEC_BPF_HARDEN
69057+ bool "Harden BPF interpreter"
69058+ default y if GRKERNSEC_CONFIG_AUTO
69059+ help
69060+ Unlike previous versions of grsecurity that hardened both the BPF
69061+ interpreted code against corruption at rest as well as the JIT code
69062+ against JIT-spray attacks and attacker-controlled immediate values
69063+ for ROP, this feature will enforce disabling of the new eBPF JIT engine
69064+ and will ensure the interpreted code is read-only at rest. This feature
69065+ may be removed at a later time when eBPF stabilizes to entirely revert
69066+ back to the more secure pre-3.16 BPF interpreter/JIT.
69067+
69068+ If you're using KERNEXEC, it's recommended that you enable this option
69069+ to supplement the hardening of the kernel.
69070+
69071+config GRKERNSEC_PERF_HARDEN
69072+ bool "Disable unprivileged PERF_EVENTS usage by default"
69073+ default y if GRKERNSEC_CONFIG_AUTO
69074+ depends on PERF_EVENTS
69075+ help
69076+ If you say Y here, the range of acceptable values for the
69077+ /proc/sys/kernel/perf_event_paranoid sysctl will be expanded to allow and
69078+ default to a new value: 3. When the sysctl is set to this value, no
69079+ unprivileged use of the PERF_EVENTS syscall interface will be permitted.
69080+
69081+ Though PERF_EVENTS can be used legitimately for performance monitoring
69082+ and low-level application profiling, it is forced on regardless of
69083+ configuration, has been at fault for several vulnerabilities, and
69084+ creates new opportunities for side channels and other information leaks.
69085+
69086+ This feature puts PERF_EVENTS into a secure default state and permits
69087+ the administrator to change out of it temporarily if unprivileged
69088+ application profiling is needed.
69089+
69090+config GRKERNSEC_RAND_THREADSTACK
69091+ bool "Insert random gaps between thread stacks"
69092+ default y if GRKERNSEC_CONFIG_AUTO
69093+ depends on PAX_RANDMMAP && !PPC
69094+ help
69095+ If you say Y here, a random-sized gap will be enforced between allocated
69096+ thread stacks. Glibc's NPTL and other threading libraries that
69097+ pass MAP_STACK to the kernel for thread stack allocation are supported.
69098+ The implementation currently provides 8 bits of entropy for the gap.
69099+
69100+ Many distributions do not compile threaded remote services with the
69101+ -fstack-check argument to GCC, causing the variable-sized stack-based
69102+ allocator, alloca(), to not probe the stack on allocation. This
69103+ permits an unbounded alloca() to skip over any guard page and potentially
69104+ modify another thread's stack reliably. An enforced random gap
69105+ reduces the reliability of such an attack and increases the chance
69106+ that such a read/write to another thread's stack instead lands in
69107+ an unmapped area, causing a crash and triggering grsecurity's
69108+ anti-bruteforcing logic.
69109+
69110+config GRKERNSEC_PROC_MEMMAP
69111+ bool "Harden ASLR against information leaks and entropy reduction"
69112+ default y if (GRKERNSEC_CONFIG_AUTO || PAX_NOEXEC || PAX_ASLR)
69113+ depends on PAX_NOEXEC || PAX_ASLR
69114+ help
69115+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
69116+ give no information about the addresses of its mappings if
69117+ PaX features that rely on random addresses are enabled on the task.
69118+ In addition to sanitizing this information and disabling other
69119+ dangerous sources of information, this option causes reads of sensitive
69120+ /proc/<pid> entries where the file descriptor was opened in a different
69121+ task than the one performing the read. Such attempts are logged.
69122+ This option also limits argv/env strings for suid/sgid binaries
69123+ to 512KB to prevent a complete exhaustion of the stack entropy provided
69124+ by ASLR. Finally, it places an 8MB stack resource limit on suid/sgid
69125+ binaries to prevent alternative mmap layouts from being abused.
69126+
69127+ If you use PaX it is essential that you say Y here as it closes up
69128+ several holes that make full ASLR useless locally.
69129+
69130+
69131+config GRKERNSEC_KSTACKOVERFLOW
69132+ bool "Prevent kernel stack overflows"
69133+ default y if GRKERNSEC_CONFIG_AUTO
69134+ depends on !IA64 && 64BIT
69135+ help
69136+ If you say Y here, the kernel's process stacks will be allocated
69137+ with vmalloc instead of the kernel's default allocator. This
69138+ introduces guard pages that in combination with the alloca checking
69139+ of the STACKLEAK feature prevents all forms of kernel process stack
69140+ overflow abuse. Note that this is different from kernel stack
69141+ buffer overflows.
69142+
69143+config GRKERNSEC_BRUTE
69144+ bool "Deter exploit bruteforcing"
69145+ default y if GRKERNSEC_CONFIG_AUTO
69146+ help
69147+ If you say Y here, attempts to bruteforce exploits against forking
69148+ daemons such as apache or sshd, as well as against suid/sgid binaries
69149+ will be deterred. When a child of a forking daemon is killed by PaX
69150+ or crashes due to an illegal instruction or other suspicious signal,
69151+ the parent process will be delayed 30 seconds upon every subsequent
69152+ fork until the administrator is able to assess the situation and
69153+ restart the daemon.
69154+ In the suid/sgid case, the attempt is logged, the user has all their
69155+ existing instances of the suid/sgid binary terminated and will
69156+ be unable to execute any suid/sgid binaries for 15 minutes.
69157+
69158+ It is recommended that you also enable signal logging in the auditing
69159+ section so that logs are generated when a process triggers a suspicious
69160+ signal.
69161+ If the sysctl option is enabled, a sysctl option with name
69162+ "deter_bruteforce" is created.
69163+
69164+config GRKERNSEC_MODHARDEN
69165+ bool "Harden module auto-loading"
69166+ default y if GRKERNSEC_CONFIG_AUTO
69167+ depends on MODULES
69168+ help
69169+ If you say Y here, module auto-loading in response to use of some
69170+ feature implemented by an unloaded module will be restricted to
69171+ root users. Enabling this option helps defend against attacks
69172+ by unprivileged users who abuse the auto-loading behavior to
69173+ cause a vulnerable module to load that is then exploited.
69174+
69175+ If this option prevents a legitimate use of auto-loading for a
69176+ non-root user, the administrator can execute modprobe manually
69177+ with the exact name of the module mentioned in the alert log.
69178+ Alternatively, the administrator can add the module to the list
69179+ of modules loaded at boot by modifying init scripts.
69180+
69181+ Modification of init scripts will most likely be needed on
69182+ Ubuntu servers with encrypted home directory support enabled,
69183+ as the first non-root user logging in will cause the ecb(aes),
69184+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
69185+
69186+config GRKERNSEC_HIDESYM
69187+ bool "Hide kernel symbols"
69188+ default y if GRKERNSEC_CONFIG_AUTO
69189+ select PAX_USERCOPY_SLABS
69190+ help
69191+ If you say Y here, getting information on loaded modules, and
69192+ displaying all kernel symbols through a syscall will be restricted
69193+ to users with CAP_SYS_MODULE. For software compatibility reasons,
69194+ /proc/kallsyms will be restricted to the root user. The RBAC
69195+ system can hide that entry even from root.
69196+
69197+ This option also prevents leaking of kernel addresses through
69198+ several /proc entries.
69199+
69200+ Note that this option is only effective provided the following
69201+ conditions are met:
69202+ 1) The kernel using grsecurity is not precompiled by some distribution
69203+ 2) You have also enabled GRKERNSEC_DMESG
69204+ 3) You are using the RBAC system and hiding other files such as your
69205+ kernel image and System.map. Alternatively, enabling this option
69206+ causes the permissions on /boot, /lib/modules, and the kernel
69207+ source directory to change at compile time to prevent
69208+ reading by non-root users.
69209+ If the above conditions are met, this option will aid in providing a
69210+ useful protection against local kernel exploitation of overflows
69211+ and arbitrary read/write vulnerabilities.
69212+
69213+ It is highly recommended that you enable GRKERNSEC_PERF_HARDEN
69214+ in addition to this feature.
69215+
69216+config GRKERNSEC_RANDSTRUCT
69217+ bool "Randomize layout of sensitive kernel structures"
69218+ default y if GRKERNSEC_CONFIG_AUTO
69219+ select GRKERNSEC_HIDESYM
69220+ select MODVERSIONS if MODULES
69221+ help
69222+ If you say Y here, the layouts of a number of sensitive kernel
69223+ structures (task, fs, cred, etc) and all structures composed entirely
69224+ of function pointers (aka "ops" structs) will be randomized at compile-time.
69225+ This can introduce the requirement of an additional infoleak
69226+ vulnerability for exploits targeting these structure types.
69227+
69228+ Enabling this feature will introduce some performance impact, slightly
69229+ increase memory usage, and prevent the use of forensic tools like
69230+ Volatility against the system (unless the kernel source tree isn't
69231+ cleaned after kernel installation).
69232+
69233+ The seed used for compilation is located at tools/gcc/randomize_layout_seed.h.
69234+ It remains after a make clean to allow for external modules to be compiled
69235+ with the existing seed and will be removed by a make mrproper or
69236+ make distclean.
69237+
69238+ Note that the implementation requires gcc 4.6.4. or newer. You may need
69239+ to install the supporting headers explicitly in addition to the normal
69240+ gcc package.
69241+
69242+config GRKERNSEC_RANDSTRUCT_PERFORMANCE
69243+ bool "Use cacheline-aware structure randomization"
69244+ depends on GRKERNSEC_RANDSTRUCT
69245+ default y if GRKERNSEC_CONFIG_PRIORITY_PERF
69246+ help
69247+ If you say Y here, the RANDSTRUCT randomization will make a best effort
69248+ at restricting randomization to cacheline-sized groups of elements. It
69249+ will further not randomize bitfields in structures. This reduces the
69250+ performance hit of RANDSTRUCT at the cost of weakened randomization.
69251+
69252+config GRKERNSEC_KERN_LOCKOUT
69253+ bool "Active kernel exploit response"
69254+ default y if GRKERNSEC_CONFIG_AUTO
69255+ depends on X86 || ARM || PPC || SPARC
69256+ help
69257+ If you say Y here, when a PaX alert is triggered due to suspicious
69258+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
69259+ or an OOPS occurs due to bad memory accesses, instead of just
69260+ terminating the offending process (and potentially allowing
69261+ a subsequent exploit from the same user), we will take one of two
69262+ actions:
69263+ If the user was root, we will panic the system
69264+ If the user was non-root, we will log the attempt, terminate
69265+ all processes owned by the user, then prevent them from creating
69266+ any new processes until the system is restarted
69267+ This deters repeated kernel exploitation/bruteforcing attempts
69268+ and is useful for later forensics.
69269+
69270+config GRKERNSEC_OLD_ARM_USERLAND
69271+ bool "Old ARM userland compatibility"
69272+ depends on ARM && (CPU_V6 || CPU_V6K || CPU_V7)
69273+ help
69274+ If you say Y here, stubs of executable code to perform such operations
69275+ as "compare-exchange" will be placed at fixed locations in the ARM vector
69276+ table. This is unfortunately needed for old ARM userland meant to run
69277+ across a wide range of processors. Without this option enabled,
69278+ the get_tls and data memory barrier stubs will be emulated by the kernel,
69279+ which is enough for Linaro userlands or other userlands designed for v6
69280+ and newer ARM CPUs. It's recommended that you try without this option enabled
69281+ first, and only enable it if your userland does not boot (it will likely fail
69282+ at init time).
69283+
69284+endmenu
69285+menu "Role Based Access Control Options"
69286+depends on GRKERNSEC
69287+
69288+config GRKERNSEC_RBAC_DEBUG
69289+ bool
69290+
69291+config GRKERNSEC_NO_RBAC
69292+ bool "Disable RBAC system"
69293+ help
69294+ If you say Y here, the /dev/grsec device will be removed from the kernel,
69295+ preventing the RBAC system from being enabled. You should only say Y
69296+ here if you have no intention of using the RBAC system, so as to prevent
69297+ an attacker with root access from misusing the RBAC system to hide files
69298+ and processes when loadable module support and /dev/[k]mem have been
69299+ locked down.
69300+
69301+config GRKERNSEC_ACL_HIDEKERN
69302+ bool "Hide kernel processes"
69303+ help
69304+ If you say Y here, all kernel threads will be hidden to all
69305+ processes but those whose subject has the "view hidden processes"
69306+ flag.
69307+
69308+config GRKERNSEC_ACL_MAXTRIES
69309+ int "Maximum tries before password lockout"
69310+ default 3
69311+ help
69312+ This option enforces the maximum number of times a user can attempt
69313+ to authorize themselves with the grsecurity RBAC system before being
69314+ denied the ability to attempt authorization again for a specified time.
69315+ The lower the number, the harder it will be to brute-force a password.
69316+
69317+config GRKERNSEC_ACL_TIMEOUT
69318+ int "Time to wait after max password tries, in seconds"
69319+ default 30
69320+ help
69321+ This option specifies the time the user must wait after attempting to
69322+ authorize to the RBAC system with the maximum number of invalid
69323+ passwords. The higher the number, the harder it will be to brute-force
69324+ a password.
69325+
69326+endmenu
69327+menu "Filesystem Protections"
69328+depends on GRKERNSEC
69329+
69330+config GRKERNSEC_PROC
69331+ bool "Proc restrictions"
69332+ default y if GRKERNSEC_CONFIG_AUTO
69333+ help
69334+ If you say Y here, the permissions of the /proc filesystem
69335+ will be altered to enhance system security and privacy. You MUST
69336+ choose either a user only restriction or a user and group restriction.
69337+ Depending upon the option you choose, you can either restrict users to
69338+ see only the processes they themselves run, or choose a group that can
69339+ view all processes and files normally restricted to root if you choose
69340+ the "restrict to user only" option. NOTE: If you're running identd or
69341+ ntpd as a non-root user, you will have to run it as the group you
69342+ specify here.
69343+
69344+config GRKERNSEC_PROC_USER
69345+ bool "Restrict /proc to user only"
69346+ depends on GRKERNSEC_PROC
69347+ help
69348+ If you say Y here, non-root users will only be able to view their own
69349+ processes, and restricts them from viewing network-related information,
69350+ and viewing kernel symbol and module information.
69351+
69352+config GRKERNSEC_PROC_USERGROUP
69353+ bool "Allow special group"
69354+ default y if GRKERNSEC_CONFIG_AUTO
69355+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
69356+ help
69357+ If you say Y here, you will be able to select a group that will be
69358+ able to view all processes and network-related information. If you've
69359+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
69360+ remain hidden. This option is useful if you want to run identd as
69361+ a non-root user. The group you select may also be chosen at boot time
69362+ via "grsec_proc_gid=" on the kernel commandline.
69363+
69364+config GRKERNSEC_PROC_GID
69365+ int "GID for special group"
69366+ depends on GRKERNSEC_PROC_USERGROUP
69367+ default 1001
69368+
69369+config GRKERNSEC_PROC_ADD
69370+ bool "Additional restrictions"
69371+ default y if GRKERNSEC_CONFIG_AUTO
69372+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
69373+ help
69374+ If you say Y here, additional restrictions will be placed on
69375+ /proc that keep normal users from viewing device information and
69376+ slabinfo information that could be useful for exploits.
69377+
69378+config GRKERNSEC_LINK
69379+ bool "Linking restrictions"
69380+ default y if GRKERNSEC_CONFIG_AUTO
69381+ help
69382+ If you say Y here, /tmp race exploits will be prevented, since users
69383+ will no longer be able to follow symlinks owned by other users in
69384+ world-writable +t directories (e.g. /tmp), unless the owner of the
69385+ symlink is the owner of the directory. users will also not be
69386+ able to hardlink to files they do not own. If the sysctl option is
69387+ enabled, a sysctl option with name "linking_restrictions" is created.
69388+
69389+config GRKERNSEC_SYMLINKOWN
69390+ bool "Kernel-enforced SymlinksIfOwnerMatch"
69391+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
69392+ help
69393+ Apache's SymlinksIfOwnerMatch option has an inherent race condition
69394+ that prevents it from being used as a security feature. As Apache
69395+ verifies the symlink by performing a stat() against the target of
69396+ the symlink before it is followed, an attacker can setup a symlink
69397+ to point to a same-owned file, then replace the symlink with one
69398+ that targets another user's file just after Apache "validates" the
69399+ symlink -- a classic TOCTOU race. If you say Y here, a complete,
69400+ race-free replacement for Apache's "SymlinksIfOwnerMatch" option
69401+ will be in place for the group you specify. If the sysctl option
69402+ is enabled, a sysctl option with name "enforce_symlinksifowner" is
69403+ created.
69404+
69405+config GRKERNSEC_SYMLINKOWN_GID
69406+ int "GID for users with kernel-enforced SymlinksIfOwnerMatch"
69407+ depends on GRKERNSEC_SYMLINKOWN
69408+ default 1006
69409+ help
69410+ Setting this GID determines what group kernel-enforced
69411+ SymlinksIfOwnerMatch will be enabled for. If the sysctl option
69412+ is enabled, a sysctl option with name "symlinkown_gid" is created.
69413+
69414+config GRKERNSEC_FIFO
69415+ bool "FIFO restrictions"
69416+ default y if GRKERNSEC_CONFIG_AUTO
69417+ help
69418+ If you say Y here, users will not be able to write to FIFOs they don't
69419+ own in world-writable +t directories (e.g. /tmp), unless the owner of
69420+ the FIFO is the same owner of the directory it's held in. If the sysctl
69421+ option is enabled, a sysctl option with name "fifo_restrictions" is
69422+ created.
69423+
69424+config GRKERNSEC_SYSFS_RESTRICT
69425+ bool "Sysfs/debugfs restriction"
69426+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
69427+ depends on SYSFS
69428+ help
69429+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
69430+ any filesystem normally mounted under it (e.g. debugfs) will be
69431+ mostly accessible only by root. These filesystems generally provide access
69432+ to hardware and debug information that isn't appropriate for unprivileged
69433+ users of the system. Sysfs and debugfs have also become a large source
69434+ of new vulnerabilities, ranging from infoleaks to local compromise.
69435+ There has been very little oversight with an eye toward security involved
69436+ in adding new exporters of information to these filesystems, so their
69437+ use is discouraged.
69438+ For reasons of compatibility, a few directories have been whitelisted
69439+ for access by non-root users:
69440+ /sys/fs/selinux
69441+ /sys/fs/fuse
69442+ /sys/devices/system/cpu
69443+
69444+config GRKERNSEC_ROFS
69445+ bool "Runtime read-only mount protection"
69446+ depends on SYSCTL
69447+ help
69448+ If you say Y here, a sysctl option with name "romount_protect" will
69449+ be created. By setting this option to 1 at runtime, filesystems
69450+ will be protected in the following ways:
69451+ * No new writable mounts will be allowed
69452+ * Existing read-only mounts won't be able to be remounted read/write
69453+ * Write operations will be denied on all block devices
69454+ This option acts independently of grsec_lock: once it is set to 1,
69455+ it cannot be turned off. Therefore, please be mindful of the resulting
69456+ behavior if this option is enabled in an init script on a read-only
69457+ filesystem.
69458+ Also be aware that as with other root-focused features, GRKERNSEC_KMEM
69459+ and GRKERNSEC_IO should be enabled and module loading disabled via
69460+ config or at runtime.
69461+ This feature is mainly intended for secure embedded systems.
69462+
69463+
69464+config GRKERNSEC_DEVICE_SIDECHANNEL
69465+ bool "Eliminate stat/notify-based device sidechannels"
69466+ default y if GRKERNSEC_CONFIG_AUTO
69467+ help
69468+ If you say Y here, timing analyses on block or character
69469+ devices like /dev/ptmx using stat or inotify/dnotify/fanotify
69470+ will be thwarted for unprivileged users. If a process without
69471+ CAP_MKNOD stats such a device, the last access and last modify times
69472+ will match the device's create time. No access or modify events
69473+ will be triggered through inotify/dnotify/fanotify for such devices.
69474+ This feature will prevent attacks that may at a minimum
69475+ allow an attacker to determine the administrator's password length.
69476+
69477+config GRKERNSEC_CHROOT
69478+ bool "Chroot jail restrictions"
69479+ default y if GRKERNSEC_CONFIG_AUTO
69480+ help
69481+ If you say Y here, you will be able to choose several options that will
69482+ make breaking out of a chrooted jail much more difficult. If you
69483+ encounter no software incompatibilities with the following options, it
69484+ is recommended that you enable each one.
69485+
69486+ Note that the chroot restrictions are not intended to apply to "chroots"
69487+ to directories that are simple bind mounts of the global root filesystem.
69488+ For several other reasons, a user shouldn't expect any significant
69489+ security by performing such a chroot.
69490+
69491+config GRKERNSEC_CHROOT_MOUNT
69492+ bool "Deny mounts"
69493+ default y if GRKERNSEC_CONFIG_AUTO
69494+ depends on GRKERNSEC_CHROOT
69495+ help
69496+ If you say Y here, processes inside a chroot will not be able to
69497+ mount or remount filesystems. If the sysctl option is enabled, a
69498+ sysctl option with name "chroot_deny_mount" is created.
69499+
69500+config GRKERNSEC_CHROOT_DOUBLE
69501+ bool "Deny double-chroots"
69502+ default y if GRKERNSEC_CONFIG_AUTO
69503+ depends on GRKERNSEC_CHROOT
69504+ help
69505+ If you say Y here, processes inside a chroot will not be able to chroot
69506+ again outside the chroot. This is a widely used method of breaking
69507+ out of a chroot jail and should not be allowed. If the sysctl
69508+ option is enabled, a sysctl option with name
69509+ "chroot_deny_chroot" is created.
69510+
69511+config GRKERNSEC_CHROOT_PIVOT
69512+ bool "Deny pivot_root in chroot"
69513+ default y if GRKERNSEC_CONFIG_AUTO
69514+ depends on GRKERNSEC_CHROOT
69515+ help
69516+ If you say Y here, processes inside a chroot will not be able to use
69517+ a function called pivot_root() that was introduced in Linux 2.3.41. It
69518+ works similar to chroot in that it changes the root filesystem. This
69519+ function could be misused in a chrooted process to attempt to break out
69520+ of the chroot, and therefore should not be allowed. If the sysctl
69521+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
69522+ created.
69523+
69524+config GRKERNSEC_CHROOT_CHDIR
69525+ bool "Enforce chdir(\"/\") on all chroots"
69526+ default y if GRKERNSEC_CONFIG_AUTO
69527+ depends on GRKERNSEC_CHROOT
69528+ help
69529+ If you say Y here, the current working directory of all newly-chrooted
69530+ applications will be set to the the root directory of the chroot.
69531+ The man page on chroot(2) states:
69532+ Note that this call does not change the current working
69533+ directory, so that `.' can be outside the tree rooted at
69534+ `/'. In particular, the super-user can escape from a
69535+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
69536+
69537+ It is recommended that you say Y here, since it's not known to break
69538+ any software. If the sysctl option is enabled, a sysctl option with
69539+ name "chroot_enforce_chdir" is created.
69540+
69541+config GRKERNSEC_CHROOT_CHMOD
69542+ bool "Deny (f)chmod +s"
69543+ default y if GRKERNSEC_CONFIG_AUTO
69544+ depends on GRKERNSEC_CHROOT
69545+ help
69546+ If you say Y here, processes inside a chroot will not be able to chmod
69547+ or fchmod files to make them have suid or sgid bits. This protects
69548+ against another published method of breaking a chroot. If the sysctl
69549+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
69550+ created.
69551+
69552+config GRKERNSEC_CHROOT_FCHDIR
69553+ bool "Deny fchdir and fhandle out of chroot"
69554+ default y if GRKERNSEC_CONFIG_AUTO
69555+ depends on GRKERNSEC_CHROOT
69556+ help
69557+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
69558+ to a file descriptor of the chrooting process that points to a directory
69559+ outside the filesystem will be stopped. Additionally, this option prevents
69560+ use of the recently-created syscall for opening files by a guessable "file
69561+ handle" inside a chroot. If the sysctl option is enabled, a sysctl option
69562+ with name "chroot_deny_fchdir" is created.
69563+
69564+config GRKERNSEC_CHROOT_MKNOD
69565+ bool "Deny mknod"
69566+ default y if GRKERNSEC_CONFIG_AUTO
69567+ depends on GRKERNSEC_CHROOT
69568+ help
69569+ If you say Y here, processes inside a chroot will not be allowed to
69570+ mknod. The problem with using mknod inside a chroot is that it
69571+ would allow an attacker to create a device entry that is the same
69572+ as one on the physical root of your system, which could range from
69573+ anything from the console device to a device for your harddrive (which
69574+ they could then use to wipe the drive or steal data). It is recommended
69575+ that you say Y here, unless you run into software incompatibilities.
69576+ If the sysctl option is enabled, a sysctl option with name
69577+ "chroot_deny_mknod" is created.
69578+
69579+config GRKERNSEC_CHROOT_SHMAT
69580+ bool "Deny shmat() out of chroot"
69581+ default y if GRKERNSEC_CONFIG_AUTO
69582+ depends on GRKERNSEC_CHROOT
69583+ help
69584+ If you say Y here, processes inside a chroot will not be able to attach
69585+ to shared memory segments that were created outside of the chroot jail.
69586+ It is recommended that you say Y here. If the sysctl option is enabled,
69587+ a sysctl option with name "chroot_deny_shmat" is created.
69588+
69589+config GRKERNSEC_CHROOT_UNIX
69590+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
69591+ default y if GRKERNSEC_CONFIG_AUTO
69592+ depends on GRKERNSEC_CHROOT
69593+ help
69594+ If you say Y here, processes inside a chroot will not be able to
69595+ connect to abstract (meaning not belonging to a filesystem) Unix
69596+ domain sockets that were bound outside of a chroot. It is recommended
69597+ that you say Y here. If the sysctl option is enabled, a sysctl option
69598+ with name "chroot_deny_unix" is created.
69599+
69600+config GRKERNSEC_CHROOT_FINDTASK
69601+ bool "Protect outside processes"
69602+ default y if GRKERNSEC_CONFIG_AUTO
69603+ depends on GRKERNSEC_CHROOT
69604+ help
69605+ If you say Y here, processes inside a chroot will not be able to
69606+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
69607+ getsid, or view any process outside of the chroot. If the sysctl
69608+ option is enabled, a sysctl option with name "chroot_findtask" is
69609+ created.
69610+
69611+config GRKERNSEC_CHROOT_NICE
69612+ bool "Restrict priority changes"
69613+ default y if GRKERNSEC_CONFIG_AUTO
69614+ depends on GRKERNSEC_CHROOT
69615+ help
69616+ If you say Y here, processes inside a chroot will not be able to raise
69617+ the priority of processes in the chroot, or alter the priority of
69618+ processes outside the chroot. This provides more security than simply
69619+ removing CAP_SYS_NICE from the process' capability set. If the
69620+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
69621+ is created.
69622+
69623+config GRKERNSEC_CHROOT_SYSCTL
69624+ bool "Deny sysctl writes"
69625+ default y if GRKERNSEC_CONFIG_AUTO
69626+ depends on GRKERNSEC_CHROOT
69627+ help
69628+ If you say Y here, an attacker in a chroot will not be able to
69629+ write to sysctl entries, either by sysctl(2) or through a /proc
69630+ interface. It is strongly recommended that you say Y here. If the
69631+ sysctl option is enabled, a sysctl option with name
69632+ "chroot_deny_sysctl" is created.
69633+
69634+config GRKERNSEC_CHROOT_CAPS
69635+ bool "Capability restrictions"
69636+ default y if GRKERNSEC_CONFIG_AUTO
69637+ depends on GRKERNSEC_CHROOT
69638+ help
69639+ If you say Y here, the capabilities on all processes within a
69640+ chroot jail will be lowered to stop module insertion, raw i/o,
69641+ system and net admin tasks, rebooting the system, modifying immutable
69642+ files, modifying IPC owned by another, and changing the system time.
69643+ This is left an option because it can break some apps. Disable this
69644+ if your chrooted apps are having problems performing those kinds of
69645+ tasks. If the sysctl option is enabled, a sysctl option with
69646+ name "chroot_caps" is created.
69647+
69648+config GRKERNSEC_CHROOT_INITRD
69649+ bool "Exempt initrd tasks from restrictions"
69650+ default y if GRKERNSEC_CONFIG_AUTO
69651+ depends on GRKERNSEC_CHROOT && BLK_DEV_INITRD
69652+ help
69653+ If you say Y here, tasks started prior to init will be exempted from
69654+ grsecurity's chroot restrictions. This option is mainly meant to
69655+ resolve Plymouth's performing privileged operations unnecessarily
69656+ in a chroot.
69657+
69658+endmenu
69659+menu "Kernel Auditing"
69660+depends on GRKERNSEC
69661+
69662+config GRKERNSEC_AUDIT_GROUP
69663+ bool "Single group for auditing"
69664+ help
69665+ If you say Y here, the exec and chdir logging features will only operate
69666+ on a group you specify. This option is recommended if you only want to
69667+ watch certain users instead of having a large amount of logs from the
69668+ entire system. If the sysctl option is enabled, a sysctl option with
69669+ name "audit_group" is created.
69670+
69671+config GRKERNSEC_AUDIT_GID
69672+ int "GID for auditing"
69673+ depends on GRKERNSEC_AUDIT_GROUP
69674+ default 1007
69675+
69676+config GRKERNSEC_EXECLOG
69677+ bool "Exec logging"
69678+ help
69679+ If you say Y here, all execve() calls will be logged (since the
69680+ other exec*() calls are frontends to execve(), all execution
69681+ will be logged). Useful for shell-servers that like to keep track
69682+ of their users. If the sysctl option is enabled, a sysctl option with
69683+ name "exec_logging" is created.
69684+ WARNING: This option when enabled will produce a LOT of logs, especially
69685+ on an active system.
69686+
69687+config GRKERNSEC_RESLOG
69688+ bool "Resource logging"
69689+ default y if GRKERNSEC_CONFIG_AUTO
69690+ help
69691+ If you say Y here, all attempts to overstep resource limits will
69692+ be logged with the resource name, the requested size, and the current
69693+ limit. It is highly recommended that you say Y here. If the sysctl
69694+ option is enabled, a sysctl option with name "resource_logging" is
69695+ created. If the RBAC system is enabled, the sysctl value is ignored.
69696+
69697+config GRKERNSEC_CHROOT_EXECLOG
69698+ bool "Log execs within chroot"
69699+ help
69700+ If you say Y here, all executions inside a chroot jail will be logged
69701+ to syslog. This can cause a large amount of logs if certain
69702+ applications (eg. djb's daemontools) are installed on the system, and
69703+ is therefore left as an option. If the sysctl option is enabled, a
69704+ sysctl option with name "chroot_execlog" is created.
69705+
69706+config GRKERNSEC_AUDIT_PTRACE
69707+ bool "Ptrace logging"
69708+ help
69709+ If you say Y here, all attempts to attach to a process via ptrace
69710+ will be logged. If the sysctl option is enabled, a sysctl option
69711+ with name "audit_ptrace" is created.
69712+
69713+config GRKERNSEC_AUDIT_CHDIR
69714+ bool "Chdir logging"
69715+ help
69716+ If you say Y here, all chdir() calls will be logged. If the sysctl
69717+ option is enabled, a sysctl option with name "audit_chdir" is created.
69718+
69719+config GRKERNSEC_AUDIT_MOUNT
69720+ bool "(Un)Mount logging"
69721+ help
69722+ If you say Y here, all mounts and unmounts will be logged. If the
69723+ sysctl option is enabled, a sysctl option with name "audit_mount" is
69724+ created.
69725+
69726+config GRKERNSEC_SIGNAL
69727+ bool "Signal logging"
69728+ default y if GRKERNSEC_CONFIG_AUTO
69729+ help
69730+ If you say Y here, certain important signals will be logged, such as
69731+ SIGSEGV, which will as a result inform you of when a error in a program
69732+ occurred, which in some cases could mean a possible exploit attempt.
69733+ If the sysctl option is enabled, a sysctl option with name
69734+ "signal_logging" is created.
69735+
69736+config GRKERNSEC_FORKFAIL
69737+ bool "Fork failure logging"
69738+ help
69739+ If you say Y here, all failed fork() attempts will be logged.
69740+ This could suggest a fork bomb, or someone attempting to overstep
69741+ their process limit. If the sysctl option is enabled, a sysctl option
69742+ with name "forkfail_logging" is created.
69743+
69744+config GRKERNSEC_TIME
69745+ bool "Time change logging"
69746+ default y if GRKERNSEC_CONFIG_AUTO
69747+ help
69748+ If you say Y here, any changes of the system clock will be logged.
69749+ If the sysctl option is enabled, a sysctl option with name
69750+ "timechange_logging" is created.
69751+
69752+config GRKERNSEC_PROC_IPADDR
69753+ bool "/proc/<pid>/ipaddr support"
69754+ default y if GRKERNSEC_CONFIG_AUTO
69755+ help
69756+ If you say Y here, a new entry will be added to each /proc/<pid>
69757+ directory that contains the IP address of the person using the task.
69758+ The IP is carried across local TCP and AF_UNIX stream sockets.
69759+ This information can be useful for IDS/IPSes to perform remote response
69760+ to a local attack. The entry is readable by only the owner of the
69761+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
69762+ the RBAC system), and thus does not create privacy concerns.
69763+
69764+config GRKERNSEC_RWXMAP_LOG
69765+ bool 'Denied RWX mmap/mprotect logging'
69766+ default y if GRKERNSEC_CONFIG_AUTO
69767+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
69768+ help
69769+ If you say Y here, calls to mmap() and mprotect() with explicit
69770+ usage of PROT_WRITE and PROT_EXEC together will be logged when
69771+ denied by the PAX_MPROTECT feature. This feature will also
69772+ log other problematic scenarios that can occur when PAX_MPROTECT
69773+ is enabled on a binary, like textrels and PT_GNU_STACK. If the
69774+ sysctl option is enabled, a sysctl option with name "rwxmap_logging"
69775+ is created.
69776+
69777+endmenu
69778+
69779+menu "Executable Protections"
69780+depends on GRKERNSEC
69781+
69782+config GRKERNSEC_DMESG
69783+ bool "Dmesg(8) restriction"
69784+ default y if GRKERNSEC_CONFIG_AUTO
69785+ help
69786+ If you say Y here, non-root users will not be able to use dmesg(8)
69787+ to view the contents of the kernel's circular log buffer.
69788+ The kernel's log buffer often contains kernel addresses and other
69789+ identifying information useful to an attacker in fingerprinting a
69790+ system for a targeted exploit.
69791+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
69792+ created.
69793+
69794+config GRKERNSEC_HARDEN_PTRACE
69795+ bool "Deter ptrace-based process snooping"
69796+ default y if GRKERNSEC_CONFIG_AUTO
69797+ help
69798+ If you say Y here, TTY sniffers and other malicious monitoring
69799+ programs implemented through ptrace will be defeated. If you
69800+ have been using the RBAC system, this option has already been
69801+ enabled for several years for all users, with the ability to make
69802+ fine-grained exceptions.
69803+
69804+ This option only affects the ability of non-root users to ptrace
69805+ processes that are not a descendent of the ptracing process.
69806+ This means that strace ./binary and gdb ./binary will still work,
69807+ but attaching to arbitrary processes will not. If the sysctl
69808+ option is enabled, a sysctl option with name "harden_ptrace" is
69809+ created.
69810+
69811+config GRKERNSEC_PTRACE_READEXEC
69812+ bool "Require read access to ptrace sensitive binaries"
69813+ default y if GRKERNSEC_CONFIG_AUTO
69814+ help
69815+ If you say Y here, unprivileged users will not be able to ptrace unreadable
69816+ binaries. This option is useful in environments that
69817+ remove the read bits (e.g. file mode 4711) from suid binaries to
69818+ prevent infoleaking of their contents. This option adds
69819+ consistency to the use of that file mode, as the binary could normally
69820+ be read out when run without privileges while ptracing.
69821+
69822+ If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
69823+ is created.
69824+
69825+config GRKERNSEC_SETXID
69826+ bool "Enforce consistent multithreaded privileges"
69827+ default y if GRKERNSEC_CONFIG_AUTO
69828+ depends on (X86 || SPARC64 || PPC || ARM || MIPS)
69829+ help
69830+ If you say Y here, a change from a root uid to a non-root uid
69831+ in a multithreaded application will cause the resulting uids,
69832+ gids, supplementary groups, and capabilities in that thread
69833+ to be propagated to the other threads of the process. In most
69834+ cases this is unnecessary, as glibc will emulate this behavior
69835+ on behalf of the application. Other libcs do not act in the
69836+ same way, allowing the other threads of the process to continue
69837+ running with root privileges. If the sysctl option is enabled,
69838+ a sysctl option with name "consistent_setxid" is created.
69839+
69840+config GRKERNSEC_HARDEN_IPC
69841+ bool "Disallow access to overly-permissive IPC objects"
69842+ default y if GRKERNSEC_CONFIG_AUTO
69843+ depends on SYSVIPC
69844+ help
69845+ If you say Y here, access to overly-permissive IPC objects (shared
69846+ memory, message queues, and semaphores) will be denied for processes
69847+ given the following criteria beyond normal permission checks:
69848+ 1) If the IPC object is world-accessible and the euid doesn't match
69849+ that of the creator or current uid for the IPC object
69850+ 2) If the IPC object is group-accessible and the egid doesn't
69851+ match that of the creator or current gid for the IPC object
69852+ It's a common error to grant too much permission to these objects,
69853+ with impact ranging from denial of service and information leaking to
69854+ privilege escalation. This feature was developed in response to
69855+ research by Tim Brown:
69856+ http://labs.portcullis.co.uk/whitepapers/memory-squatting-attacks-on-system-v-shared-memory/
69857+ who found hundreds of such insecure usages. Processes with
69858+ CAP_IPC_OWNER are still permitted to access these IPC objects.
69859+ If the sysctl option is enabled, a sysctl option with name
69860+ "harden_ipc" is created.
69861+
69862+config GRKERNSEC_TPE
69863+ bool "Trusted Path Execution (TPE)"
69864+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
69865+ help
69866+ If you say Y here, you will be able to choose a gid to add to the
69867+ supplementary groups of users you want to mark as "untrusted."
69868+ These users will not be able to execute any files that are not in
69869+ root-owned directories writable only by root. If the sysctl option
69870+ is enabled, a sysctl option with name "tpe" is created.
69871+
69872+config GRKERNSEC_TPE_ALL
69873+ bool "Partially restrict all non-root users"
69874+ depends on GRKERNSEC_TPE
69875+ help
69876+ If you say Y here, all non-root users will be covered under
69877+ a weaker TPE restriction. This is separate from, and in addition to,
69878+ the main TPE options that you have selected elsewhere. Thus, if a
69879+ "trusted" GID is chosen, this restriction applies to even that GID.
69880+ Under this restriction, all non-root users will only be allowed to
69881+ execute files in directories they own that are not group or
69882+ world-writable, or in directories owned by root and writable only by
69883+ root. If the sysctl option is enabled, a sysctl option with name
69884+ "tpe_restrict_all" is created.
69885+
69886+config GRKERNSEC_TPE_INVERT
69887+ bool "Invert GID option"
69888+ depends on GRKERNSEC_TPE
69889+ help
69890+ If you say Y here, the group you specify in the TPE configuration will
69891+ decide what group TPE restrictions will be *disabled* for. This
69892+ option is useful if you want TPE restrictions to be applied to most
69893+ users on the system. If the sysctl option is enabled, a sysctl option
69894+ with name "tpe_invert" is created. Unlike other sysctl options, this
69895+ entry will default to on for backward-compatibility.
69896+
69897+config GRKERNSEC_TPE_GID
69898+ int
69899+ default GRKERNSEC_TPE_UNTRUSTED_GID if (GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT)
69900+ default GRKERNSEC_TPE_TRUSTED_GID if (GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT)
69901+
69902+config GRKERNSEC_TPE_UNTRUSTED_GID
69903+ int "GID for TPE-untrusted users"
69904+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
69905+ default 1005
69906+ help
69907+ Setting this GID determines what group TPE restrictions will be
69908+ *enabled* for. If the sysctl option is enabled, a sysctl option
69909+ with name "tpe_gid" is created.
69910+
69911+config GRKERNSEC_TPE_TRUSTED_GID
69912+ int "GID for TPE-trusted users"
69913+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
69914+ default 1005
69915+ help
69916+ Setting this GID determines what group TPE restrictions will be
69917+ *disabled* for. If the sysctl option is enabled, a sysctl option
69918+ with name "tpe_gid" is created.
69919+
69920+endmenu
69921+menu "Network Protections"
69922+depends on GRKERNSEC
69923+
69924+config GRKERNSEC_BLACKHOLE
69925+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
69926+ default y if GRKERNSEC_CONFIG_AUTO
69927+ depends on NET
69928+ help
69929+ If you say Y here, neither TCP resets nor ICMP
69930+ destination-unreachable packets will be sent in response to packets
69931+ sent to ports for which no associated listening process exists.
69932+ It will also prevent the sending of ICMP protocol unreachable packets
69933+ in response to packets with unknown protocols.
69934+ This feature supports both IPV4 and IPV6 and exempts the
69935+ loopback interface from blackholing. Enabling this feature
69936+ makes a host more resilient to DoS attacks and reduces network
69937+ visibility against scanners.
69938+
69939+ The blackhole feature as-implemented is equivalent to the FreeBSD
69940+ blackhole feature, as it prevents RST responses to all packets, not
69941+ just SYNs. Under most application behavior this causes no
69942+ problems, but applications (like haproxy) may not close certain
69943+ connections in a way that cleanly terminates them on the remote
69944+ end, leaving the remote host in LAST_ACK state. Because of this
69945+ side-effect and to prevent intentional LAST_ACK DoSes, this
69946+ feature also adds automatic mitigation against such attacks.
69947+ The mitigation drastically reduces the amount of time a socket
69948+ can spend in LAST_ACK state. If you're using haproxy and not
69949+ all servers it connects to have this option enabled, consider
69950+ disabling this feature on the haproxy host.
69951+
69952+ If the sysctl option is enabled, two sysctl options with names
69953+ "ip_blackhole" and "lastack_retries" will be created.
69954+ While "ip_blackhole" takes the standard zero/non-zero on/off
69955+ toggle, "lastack_retries" uses the same kinds of values as
69956+ "tcp_retries1" and "tcp_retries2". The default value of 4
69957+ prevents a socket from lasting more than 45 seconds in LAST_ACK
69958+ state.
69959+
69960+config GRKERNSEC_NO_SIMULT_CONNECT
69961+ bool "Disable TCP Simultaneous Connect"
69962+ default y if GRKERNSEC_CONFIG_AUTO
69963+ depends on NET
69964+ help
69965+ If you say Y here, a feature by Willy Tarreau will be enabled that
69966+ removes a weakness in Linux's strict implementation of TCP that
69967+ allows two clients to connect to each other without either entering
69968+ a listening state. The weakness allows an attacker to easily prevent
69969+ a client from connecting to a known server provided the source port
69970+ for the connection is guessed correctly.
69971+
69972+ As the weakness could be used to prevent an antivirus or IPS from
69973+ fetching updates, or prevent an SSL gateway from fetching a CRL,
69974+ it should be eliminated by enabling this option. Though Linux is
69975+ one of few operating systems supporting simultaneous connect, it
69976+ has no legitimate use in practice and is rarely supported by firewalls.
69977+
69978+config GRKERNSEC_SOCKET
69979+ bool "Socket restrictions"
69980+ depends on NET
69981+ help
69982+ If you say Y here, you will be able to choose from several options.
69983+ If you assign a GID on your system and add it to the supplementary
69984+ groups of users you want to restrict socket access to, this patch
69985+ will perform up to three things, based on the option(s) you choose.
69986+
69987+config GRKERNSEC_SOCKET_ALL
69988+ bool "Deny any sockets to group"
69989+ depends on GRKERNSEC_SOCKET
69990+ help
69991+ If you say Y here, you will be able to choose a GID of whose users will
69992+ be unable to connect to other hosts from your machine or run server
69993+ applications from your machine. If the sysctl option is enabled, a
69994+ sysctl option with name "socket_all" is created.
69995+
69996+config GRKERNSEC_SOCKET_ALL_GID
69997+ int "GID to deny all sockets for"
69998+ depends on GRKERNSEC_SOCKET_ALL
69999+ default 1004
70000+ help
70001+ Here you can choose the GID to disable socket access for. Remember to
70002+ add the users you want socket access disabled for to the GID
70003+ specified here. If the sysctl option is enabled, a sysctl option
70004+ with name "socket_all_gid" is created.
70005+
70006+config GRKERNSEC_SOCKET_CLIENT
70007+ bool "Deny client sockets to group"
70008+ depends on GRKERNSEC_SOCKET
70009+ help
70010+ If you say Y here, you will be able to choose a GID of whose users will
70011+ be unable to connect to other hosts from your machine, but will be
70012+ able to run servers. If this option is enabled, all users in the group
70013+ you specify will have to use passive mode when initiating ftp transfers
70014+ from the shell on your machine. If the sysctl option is enabled, a
70015+ sysctl option with name "socket_client" is created.
70016+
70017+config GRKERNSEC_SOCKET_CLIENT_GID
70018+ int "GID to deny client sockets for"
70019+ depends on GRKERNSEC_SOCKET_CLIENT
70020+ default 1003
70021+ help
70022+ Here you can choose the GID to disable client socket access for.
70023+ Remember to add the users you want client socket access disabled for to
70024+ the GID specified here. If the sysctl option is enabled, a sysctl
70025+ option with name "socket_client_gid" is created.
70026+
70027+config GRKERNSEC_SOCKET_SERVER
70028+ bool "Deny server sockets to group"
70029+ depends on GRKERNSEC_SOCKET
70030+ help
70031+ If you say Y here, you will be able to choose a GID of whose users will
70032+ be unable to run server applications from your machine. If the sysctl
70033+ option is enabled, a sysctl option with name "socket_server" is created.
70034+
70035+config GRKERNSEC_SOCKET_SERVER_GID
70036+ int "GID to deny server sockets for"
70037+ depends on GRKERNSEC_SOCKET_SERVER
70038+ default 1002
70039+ help
70040+ Here you can choose the GID to disable server socket access for.
70041+ Remember to add the users you want server socket access disabled for to
70042+ the GID specified here. If the sysctl option is enabled, a sysctl
70043+ option with name "socket_server_gid" is created.
70044+
70045+endmenu
70046+
70047+menu "Physical Protections"
70048+depends on GRKERNSEC
70049+
70050+config GRKERNSEC_DENYUSB
70051+ bool "Deny new USB connections after toggle"
70052+ default y if GRKERNSEC_CONFIG_AUTO
70053+ depends on SYSCTL && USB_SUPPORT
70054+ help
70055+ If you say Y here, a new sysctl option with name "deny_new_usb"
70056+ will be created. Setting its value to 1 will prevent any new
70057+ USB devices from being recognized by the OS. Any attempted USB
70058+ device insertion will be logged. This option is intended to be
70059+ used against custom USB devices designed to exploit vulnerabilities
70060+ in various USB device drivers.
70061+
70062+ For greatest effectiveness, this sysctl should be set after any
70063+ relevant init scripts. This option is safe to enable in distros
70064+ as each user can choose whether or not to toggle the sysctl.
70065+
70066+config GRKERNSEC_DENYUSB_FORCE
70067+ bool "Reject all USB devices not connected at boot"
70068+ select USB
70069+ depends on GRKERNSEC_DENYUSB
70070+ help
70071+ If you say Y here, a variant of GRKERNSEC_DENYUSB will be enabled
70072+ that doesn't involve a sysctl entry. This option should only be
70073+ enabled if you're sure you want to deny all new USB connections
70074+ at runtime and don't want to modify init scripts. This should not
70075+ be enabled by distros. It forces the core USB code to be built
70076+ into the kernel image so that all devices connected at boot time
70077+ can be recognized and new USB device connections can be prevented
70078+ prior to init running.
70079+
70080+endmenu
70081+
70082+menu "Sysctl Support"
70083+depends on GRKERNSEC && SYSCTL
70084+
70085+config GRKERNSEC_SYSCTL
70086+ bool "Sysctl support"
70087+ default y if GRKERNSEC_CONFIG_AUTO
70088+ help
70089+ If you say Y here, you will be able to change the options that
70090+ grsecurity runs with at bootup, without having to recompile your
70091+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
70092+ to enable (1) or disable (0) various features. All the sysctl entries
70093+ are mutable until the "grsec_lock" entry is set to a non-zero value.
70094+ All features enabled in the kernel configuration are disabled at boot
70095+ if you do not say Y to the "Turn on features by default" option.
70096+ All options should be set at startup, and the grsec_lock entry should
70097+ be set to a non-zero value after all the options are set.
70098+ *THIS IS EXTREMELY IMPORTANT*
70099+
70100+config GRKERNSEC_SYSCTL_DISTRO
70101+ bool "Extra sysctl support for distro makers (READ HELP)"
70102+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
70103+ help
70104+ If you say Y here, additional sysctl options will be created
70105+ for features that affect processes running as root. Therefore,
70106+ it is critical when using this option that the grsec_lock entry be
70107+ enabled after boot. Only distros with prebuilt kernel packages
70108+ with this option enabled that can ensure grsec_lock is enabled
70109+ after boot should use this option.
70110+ *Failure to set grsec_lock after boot makes all grsec features
70111+ this option covers useless*
70112+
70113+ Currently this option creates the following sysctl entries:
70114+ "Disable Privileged I/O": "disable_priv_io"
70115+
70116+config GRKERNSEC_SYSCTL_ON
70117+ bool "Turn on features by default"
70118+ default y if GRKERNSEC_CONFIG_AUTO
70119+ depends on GRKERNSEC_SYSCTL
70120+ help
70121+ If you say Y here, instead of having all features enabled in the
70122+ kernel configuration disabled at boot time, the features will be
70123+ enabled at boot time. It is recommended you say Y here unless
70124+ there is some reason you would want all sysctl-tunable features to
70125+ be disabled by default. As mentioned elsewhere, it is important
70126+ to enable the grsec_lock entry once you have finished modifying
70127+ the sysctl entries.
70128+
70129+endmenu
70130+menu "Logging Options"
70131+depends on GRKERNSEC
70132+
70133+config GRKERNSEC_FLOODTIME
70134+ int "Seconds in between log messages (minimum)"
70135+ default 10
70136+ help
70137+ This option allows you to enforce the number of seconds between
70138+ grsecurity log messages. The default should be suitable for most
70139+ people, however, if you choose to change it, choose a value small enough
70140+ to allow informative logs to be produced, but large enough to
70141+ prevent flooding.
70142+
70143+ Setting both this value and GRKERNSEC_FLOODBURST to 0 will disable
70144+ any rate limiting on grsecurity log messages.
70145+
70146+config GRKERNSEC_FLOODBURST
70147+ int "Number of messages in a burst (maximum)"
70148+ default 6
70149+ help
70150+ This option allows you to choose the maximum number of messages allowed
70151+ within the flood time interval you chose in a separate option. The
70152+ default should be suitable for most people, however if you find that
70153+ many of your logs are being interpreted as flooding, you may want to
70154+ raise this value.
70155+
70156+ Setting both this value and GRKERNSEC_FLOODTIME to 0 will disable
70157+ any rate limiting on grsecurity log messages.
70158+
70159+endmenu
70160diff --git a/grsecurity/Makefile b/grsecurity/Makefile
70161new file mode 100644
70162index 0000000..30ababb
70163--- /dev/null
70164+++ b/grsecurity/Makefile
70165@@ -0,0 +1,54 @@
70166+# grsecurity – access control and security hardening for Linux
70167+# All code in this directory and various hooks located throughout the Linux kernel are
70168+# Copyright (C) 2001-2014 Bradley Spengler, Open Source Security, Inc.
70169+# http://www.grsecurity.net spender@grsecurity.net
70170+#
70171+# This program is free software; you can redistribute it and/or
70172+# modify it under the terms of the GNU General Public License version 2
70173+# as published by the Free Software Foundation.
70174+#
70175+# This program is distributed in the hope that it will be useful,
70176+# but WITHOUT ANY WARRANTY; without even the implied warranty of
70177+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
70178+# GNU General Public License for more details.
70179+#
70180+# You should have received a copy of the GNU General Public License
70181+# along with this program; if not, write to the Free Software
70182+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
70183+
70184+KBUILD_CFLAGS += -Werror
70185+
70186+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
70187+ grsec_mount.o grsec_sig.o grsec_sysctl.o \
70188+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o \
70189+ grsec_usb.o grsec_ipc.o grsec_proc.o
70190+
70191+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
70192+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
70193+ gracl_learn.o grsec_log.o gracl_policy.o
70194+ifdef CONFIG_COMPAT
70195+obj-$(CONFIG_GRKERNSEC) += gracl_compat.o
70196+endif
70197+
70198+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
70199+
70200+ifdef CONFIG_NET
70201+obj-y += grsec_sock.o
70202+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
70203+endif
70204+
70205+ifndef CONFIG_GRKERNSEC
70206+obj-y += grsec_disabled.o
70207+endif
70208+
70209+ifdef CONFIG_GRKERNSEC_HIDESYM
70210+extra-y := grsec_hidesym.o
70211+$(obj)/grsec_hidesym.o:
70212+ @-chmod -f 500 /boot
70213+ @-chmod -f 500 /lib/modules
70214+ @-chmod -f 500 /lib64/modules
70215+ @-chmod -f 500 /lib32/modules
70216+ @-chmod -f 700 .
70217+ @-chmod -f 700 $(objtree)
70218+ @echo ' grsec: protected kernel image paths'
70219+endif
70220diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
70221new file mode 100644
70222index 0000000..6ae3aa0
70223--- /dev/null
70224+++ b/grsecurity/gracl.c
70225@@ -0,0 +1,2703 @@
70226+#include <linux/kernel.h>
70227+#include <linux/module.h>
70228+#include <linux/sched.h>
70229+#include <linux/mm.h>
70230+#include <linux/file.h>
70231+#include <linux/fs.h>
70232+#include <linux/namei.h>
70233+#include <linux/mount.h>
70234+#include <linux/tty.h>
70235+#include <linux/proc_fs.h>
70236+#include <linux/lglock.h>
70237+#include <linux/slab.h>
70238+#include <linux/vmalloc.h>
70239+#include <linux/types.h>
70240+#include <linux/sysctl.h>
70241+#include <linux/netdevice.h>
70242+#include <linux/ptrace.h>
70243+#include <linux/gracl.h>
70244+#include <linux/gralloc.h>
70245+#include <linux/security.h>
70246+#include <linux/grinternal.h>
70247+#include <linux/pid_namespace.h>
70248+#include <linux/stop_machine.h>
70249+#include <linux/fdtable.h>
70250+#include <linux/percpu.h>
70251+#include <linux/lglock.h>
70252+#include <linux/hugetlb.h>
70253+#include <linux/posix-timers.h>
70254+#include <linux/prefetch.h>
70255+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
70256+#include <linux/magic.h>
70257+#include <linux/pagemap.h>
70258+#include "../fs/btrfs/async-thread.h"
70259+#include "../fs/btrfs/ctree.h"
70260+#include "../fs/btrfs/btrfs_inode.h"
70261+#endif
70262+#include "../fs/mount.h"
70263+
70264+#include <asm/uaccess.h>
70265+#include <asm/errno.h>
70266+#include <asm/mman.h>
70267+
70268+#define FOR_EACH_ROLE_START(role) \
70269+ role = running_polstate.role_list; \
70270+ while (role) {
70271+
70272+#define FOR_EACH_ROLE_END(role) \
70273+ role = role->prev; \
70274+ }
70275+
70276+extern struct path gr_real_root;
70277+
70278+static struct gr_policy_state running_polstate;
70279+struct gr_policy_state *polstate = &running_polstate;
70280+extern struct gr_alloc_state *current_alloc_state;
70281+
70282+extern char *gr_shared_page[4];
70283+DEFINE_RWLOCK(gr_inode_lock);
70284+
70285+static unsigned int gr_status __read_only = GR_STATUS_INIT;
70286+
70287+#ifdef CONFIG_NET
70288+extern struct vfsmount *sock_mnt;
70289+#endif
70290+
70291+extern struct vfsmount *pipe_mnt;
70292+extern struct vfsmount *shm_mnt;
70293+
70294+#ifdef CONFIG_HUGETLBFS
70295+extern struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
70296+#endif
70297+
70298+extern u16 acl_sp_role_value;
70299+extern struct acl_object_label *fakefs_obj_rw;
70300+extern struct acl_object_label *fakefs_obj_rwx;
70301+
70302+int gr_acl_is_enabled(void)
70303+{
70304+ return (gr_status & GR_READY);
70305+}
70306+
70307+void gr_enable_rbac_system(void)
70308+{
70309+ pax_open_kernel();
70310+ gr_status |= GR_READY;
70311+ pax_close_kernel();
70312+}
70313+
70314+int gr_rbac_disable(void *unused)
70315+{
70316+ pax_open_kernel();
70317+ gr_status &= ~GR_READY;
70318+ pax_close_kernel();
70319+
70320+ return 0;
70321+}
70322+
70323+static inline dev_t __get_dev(const struct dentry *dentry)
70324+{
70325+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
70326+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
70327+ return BTRFS_I(dentry->d_inode)->root->anon_dev;
70328+ else
70329+#endif
70330+ return dentry->d_sb->s_dev;
70331+}
70332+
70333+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
70334+{
70335+ return __get_dev(dentry);
70336+}
70337+
70338+static char gr_task_roletype_to_char(struct task_struct *task)
70339+{
70340+ switch (task->role->roletype &
70341+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
70342+ GR_ROLE_SPECIAL)) {
70343+ case GR_ROLE_DEFAULT:
70344+ return 'D';
70345+ case GR_ROLE_USER:
70346+ return 'U';
70347+ case GR_ROLE_GROUP:
70348+ return 'G';
70349+ case GR_ROLE_SPECIAL:
70350+ return 'S';
70351+ }
70352+
70353+ return 'X';
70354+}
70355+
70356+char gr_roletype_to_char(void)
70357+{
70358+ return gr_task_roletype_to_char(current);
70359+}
70360+
70361+__inline__ int
70362+gr_acl_tpe_check(void)
70363+{
70364+ if (unlikely(!(gr_status & GR_READY)))
70365+ return 0;
70366+ if (current->role->roletype & GR_ROLE_TPE)
70367+ return 1;
70368+ else
70369+ return 0;
70370+}
70371+
70372+int
70373+gr_handle_rawio(const struct inode *inode)
70374+{
70375+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
70376+ if (inode && (S_ISBLK(inode->i_mode) || (S_ISCHR(inode->i_mode) && imajor(inode) == RAW_MAJOR)) &&
70377+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
70378+ !capable(CAP_SYS_RAWIO))
70379+ return 1;
70380+#endif
70381+ return 0;
70382+}
70383+
70384+int
70385+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
70386+{
70387+ if (likely(lena != lenb))
70388+ return 0;
70389+
70390+ return !memcmp(a, b, lena);
70391+}
70392+
70393+static int prepend(char **buffer, int *buflen, const char *str, int namelen)
70394+{
70395+ *buflen -= namelen;
70396+ if (*buflen < 0)
70397+ return -ENAMETOOLONG;
70398+ *buffer -= namelen;
70399+ memcpy(*buffer, str, namelen);
70400+ return 0;
70401+}
70402+
70403+static int prepend_name(char **buffer, int *buflen, struct qstr *name)
70404+{
70405+ return prepend(buffer, buflen, name->name, name->len);
70406+}
70407+
70408+static int prepend_path(const struct path *path, struct path *root,
70409+ char **buffer, int *buflen)
70410+{
70411+ struct dentry *dentry = path->dentry;
70412+ struct vfsmount *vfsmnt = path->mnt;
70413+ struct mount *mnt = real_mount(vfsmnt);
70414+ bool slash = false;
70415+ int error = 0;
70416+
70417+ while (dentry != root->dentry || vfsmnt != root->mnt) {
70418+ struct dentry * parent;
70419+
70420+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
70421+ /* Global root? */
70422+ if (!mnt_has_parent(mnt)) {
70423+ goto out;
70424+ }
70425+ dentry = mnt->mnt_mountpoint;
70426+ mnt = mnt->mnt_parent;
70427+ vfsmnt = &mnt->mnt;
70428+ continue;
70429+ }
70430+ parent = dentry->d_parent;
70431+ prefetch(parent);
70432+ spin_lock(&dentry->d_lock);
70433+ error = prepend_name(buffer, buflen, &dentry->d_name);
70434+ spin_unlock(&dentry->d_lock);
70435+ if (!error)
70436+ error = prepend(buffer, buflen, "/", 1);
70437+ if (error)
70438+ break;
70439+
70440+ slash = true;
70441+ dentry = parent;
70442+ }
70443+
70444+out:
70445+ if (!error && !slash)
70446+ error = prepend(buffer, buflen, "/", 1);
70447+
70448+ return error;
70449+}
70450+
70451+/* this must be called with mount_lock and rename_lock held */
70452+
70453+static char *__our_d_path(const struct path *path, struct path *root,
70454+ char *buf, int buflen)
70455+{
70456+ char *res = buf + buflen;
70457+ int error;
70458+
70459+ prepend(&res, &buflen, "\0", 1);
70460+ error = prepend_path(path, root, &res, &buflen);
70461+ if (error)
70462+ return ERR_PTR(error);
70463+
70464+ return res;
70465+}
70466+
70467+static char *
70468+gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
70469+{
70470+ char *retval;
70471+
70472+ retval = __our_d_path(path, root, buf, buflen);
70473+ if (unlikely(IS_ERR(retval)))
70474+ retval = strcpy(buf, "<path too long>");
70475+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
70476+ retval[1] = '\0';
70477+
70478+ return retval;
70479+}
70480+
70481+static char *
70482+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
70483+ char *buf, int buflen)
70484+{
70485+ struct path path;
70486+ char *res;
70487+
70488+ path.dentry = (struct dentry *)dentry;
70489+ path.mnt = (struct vfsmount *)vfsmnt;
70490+
70491+ /* we can use gr_real_root.dentry, gr_real_root.mnt, because this is only called
70492+ by the RBAC system */
70493+ res = gen_full_path(&path, &gr_real_root, buf, buflen);
70494+
70495+ return res;
70496+}
70497+
70498+static char *
70499+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
70500+ char *buf, int buflen)
70501+{
70502+ char *res;
70503+ struct path path;
70504+ struct path root;
70505+ struct task_struct *reaper = init_pid_ns.child_reaper;
70506+
70507+ path.dentry = (struct dentry *)dentry;
70508+ path.mnt = (struct vfsmount *)vfsmnt;
70509+
70510+ /* we can't use gr_real_root.dentry, gr_real_root.mnt, because they belong only to the RBAC system */
70511+ get_fs_root(reaper->fs, &root);
70512+
70513+ read_seqlock_excl(&mount_lock);
70514+ write_seqlock(&rename_lock);
70515+ res = gen_full_path(&path, &root, buf, buflen);
70516+ write_sequnlock(&rename_lock);
70517+ read_sequnlock_excl(&mount_lock);
70518+
70519+ path_put(&root);
70520+ return res;
70521+}
70522+
70523+char *
70524+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
70525+{
70526+ char *ret;
70527+ read_seqlock_excl(&mount_lock);
70528+ write_seqlock(&rename_lock);
70529+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
70530+ PAGE_SIZE);
70531+ write_sequnlock(&rename_lock);
70532+ read_sequnlock_excl(&mount_lock);
70533+ return ret;
70534+}
70535+
70536+static char *
70537+gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
70538+{
70539+ char *ret;
70540+ char *buf;
70541+ int buflen;
70542+
70543+ read_seqlock_excl(&mount_lock);
70544+ write_seqlock(&rename_lock);
70545+ buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
70546+ ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
70547+ buflen = (int)(ret - buf);
70548+ if (buflen >= 5)
70549+ prepend(&ret, &buflen, "/proc", 5);
70550+ else
70551+ ret = strcpy(buf, "<path too long>");
70552+ write_sequnlock(&rename_lock);
70553+ read_sequnlock_excl(&mount_lock);
70554+ return ret;
70555+}
70556+
70557+char *
70558+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
70559+{
70560+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
70561+ PAGE_SIZE);
70562+}
70563+
70564+char *
70565+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
70566+{
70567+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
70568+ PAGE_SIZE);
70569+}
70570+
70571+char *
70572+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
70573+{
70574+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
70575+ PAGE_SIZE);
70576+}
70577+
70578+char *
70579+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
70580+{
70581+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
70582+ PAGE_SIZE);
70583+}
70584+
70585+char *
70586+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
70587+{
70588+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
70589+ PAGE_SIZE);
70590+}
70591+
70592+__inline__ __u32
70593+to_gr_audit(const __u32 reqmode)
70594+{
70595+ /* masks off auditable permission flags, then shifts them to create
70596+ auditing flags, and adds the special case of append auditing if
70597+ we're requesting write */
70598+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
70599+}
70600+
70601+struct acl_role_label *
70602+__lookup_acl_role_label(const struct gr_policy_state *state, const struct task_struct *task, const uid_t uid,
70603+ const gid_t gid)
70604+{
70605+ unsigned int index = gr_rhash(uid, GR_ROLE_USER, state->acl_role_set.r_size);
70606+ struct acl_role_label *match;
70607+ struct role_allowed_ip *ipp;
70608+ unsigned int x;
70609+ u32 curr_ip = task->signal->saved_ip;
70610+
70611+ match = state->acl_role_set.r_hash[index];
70612+
70613+ while (match) {
70614+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
70615+ for (x = 0; x < match->domain_child_num; x++) {
70616+ if (match->domain_children[x] == uid)
70617+ goto found;
70618+ }
70619+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
70620+ break;
70621+ match = match->next;
70622+ }
70623+found:
70624+ if (match == NULL) {
70625+ try_group:
70626+ index = gr_rhash(gid, GR_ROLE_GROUP, state->acl_role_set.r_size);
70627+ match = state->acl_role_set.r_hash[index];
70628+
70629+ while (match) {
70630+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
70631+ for (x = 0; x < match->domain_child_num; x++) {
70632+ if (match->domain_children[x] == gid)
70633+ goto found2;
70634+ }
70635+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
70636+ break;
70637+ match = match->next;
70638+ }
70639+found2:
70640+ if (match == NULL)
70641+ match = state->default_role;
70642+ if (match->allowed_ips == NULL)
70643+ return match;
70644+ else {
70645+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
70646+ if (likely
70647+ ((ntohl(curr_ip) & ipp->netmask) ==
70648+ (ntohl(ipp->addr) & ipp->netmask)))
70649+ return match;
70650+ }
70651+ match = state->default_role;
70652+ }
70653+ } else if (match->allowed_ips == NULL) {
70654+ return match;
70655+ } else {
70656+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
70657+ if (likely
70658+ ((ntohl(curr_ip) & ipp->netmask) ==
70659+ (ntohl(ipp->addr) & ipp->netmask)))
70660+ return match;
70661+ }
70662+ goto try_group;
70663+ }
70664+
70665+ return match;
70666+}
70667+
70668+static struct acl_role_label *
70669+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
70670+ const gid_t gid)
70671+{
70672+ return __lookup_acl_role_label(&running_polstate, task, uid, gid);
70673+}
70674+
70675+struct acl_subject_label *
70676+lookup_acl_subj_label(const ino_t ino, const dev_t dev,
70677+ const struct acl_role_label *role)
70678+{
70679+ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
70680+ struct acl_subject_label *match;
70681+
70682+ match = role->subj_hash[index];
70683+
70684+ while (match && (match->inode != ino || match->device != dev ||
70685+ (match->mode & GR_DELETED))) {
70686+ match = match->next;
70687+ }
70688+
70689+ if (match && !(match->mode & GR_DELETED))
70690+ return match;
70691+ else
70692+ return NULL;
70693+}
70694+
70695+struct acl_subject_label *
70696+lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
70697+ const struct acl_role_label *role)
70698+{
70699+ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
70700+ struct acl_subject_label *match;
70701+
70702+ match = role->subj_hash[index];
70703+
70704+ while (match && (match->inode != ino || match->device != dev ||
70705+ !(match->mode & GR_DELETED))) {
70706+ match = match->next;
70707+ }
70708+
70709+ if (match && (match->mode & GR_DELETED))
70710+ return match;
70711+ else
70712+ return NULL;
70713+}
70714+
70715+static struct acl_object_label *
70716+lookup_acl_obj_label(const ino_t ino, const dev_t dev,
70717+ const struct acl_subject_label *subj)
70718+{
70719+ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
70720+ struct acl_object_label *match;
70721+
70722+ match = subj->obj_hash[index];
70723+
70724+ while (match && (match->inode != ino || match->device != dev ||
70725+ (match->mode & GR_DELETED))) {
70726+ match = match->next;
70727+ }
70728+
70729+ if (match && !(match->mode & GR_DELETED))
70730+ return match;
70731+ else
70732+ return NULL;
70733+}
70734+
70735+static struct acl_object_label *
70736+lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
70737+ const struct acl_subject_label *subj)
70738+{
70739+ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
70740+ struct acl_object_label *match;
70741+
70742+ match = subj->obj_hash[index];
70743+
70744+ while (match && (match->inode != ino || match->device != dev ||
70745+ !(match->mode & GR_DELETED))) {
70746+ match = match->next;
70747+ }
70748+
70749+ if (match && (match->mode & GR_DELETED))
70750+ return match;
70751+
70752+ match = subj->obj_hash[index];
70753+
70754+ while (match && (match->inode != ino || match->device != dev ||
70755+ (match->mode & GR_DELETED))) {
70756+ match = match->next;
70757+ }
70758+
70759+ if (match && !(match->mode & GR_DELETED))
70760+ return match;
70761+ else
70762+ return NULL;
70763+}
70764+
70765+struct name_entry *
70766+__lookup_name_entry(const struct gr_policy_state *state, const char *name)
70767+{
70768+ unsigned int len = strlen(name);
70769+ unsigned int key = full_name_hash(name, len);
70770+ unsigned int index = key % state->name_set.n_size;
70771+ struct name_entry *match;
70772+
70773+ match = state->name_set.n_hash[index];
70774+
70775+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
70776+ match = match->next;
70777+
70778+ return match;
70779+}
70780+
70781+static struct name_entry *
70782+lookup_name_entry(const char *name)
70783+{
70784+ return __lookup_name_entry(&running_polstate, name);
70785+}
70786+
70787+static struct name_entry *
70788+lookup_name_entry_create(const char *name)
70789+{
70790+ unsigned int len = strlen(name);
70791+ unsigned int key = full_name_hash(name, len);
70792+ unsigned int index = key % running_polstate.name_set.n_size;
70793+ struct name_entry *match;
70794+
70795+ match = running_polstate.name_set.n_hash[index];
70796+
70797+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
70798+ !match->deleted))
70799+ match = match->next;
70800+
70801+ if (match && match->deleted)
70802+ return match;
70803+
70804+ match = running_polstate.name_set.n_hash[index];
70805+
70806+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
70807+ match->deleted))
70808+ match = match->next;
70809+
70810+ if (match && !match->deleted)
70811+ return match;
70812+ else
70813+ return NULL;
70814+}
70815+
70816+static struct inodev_entry *
70817+lookup_inodev_entry(const ino_t ino, const dev_t dev)
70818+{
70819+ unsigned int index = gr_fhash(ino, dev, running_polstate.inodev_set.i_size);
70820+ struct inodev_entry *match;
70821+
70822+ match = running_polstate.inodev_set.i_hash[index];
70823+
70824+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
70825+ match = match->next;
70826+
70827+ return match;
70828+}
70829+
70830+void
70831+__insert_inodev_entry(const struct gr_policy_state *state, struct inodev_entry *entry)
70832+{
70833+ unsigned int index = gr_fhash(entry->nentry->inode, entry->nentry->device,
70834+ state->inodev_set.i_size);
70835+ struct inodev_entry **curr;
70836+
70837+ entry->prev = NULL;
70838+
70839+ curr = &state->inodev_set.i_hash[index];
70840+ if (*curr != NULL)
70841+ (*curr)->prev = entry;
70842+
70843+ entry->next = *curr;
70844+ *curr = entry;
70845+
70846+ return;
70847+}
70848+
70849+static void
70850+insert_inodev_entry(struct inodev_entry *entry)
70851+{
70852+ __insert_inodev_entry(&running_polstate, entry);
70853+}
70854+
70855+void
70856+insert_acl_obj_label(struct acl_object_label *obj,
70857+ struct acl_subject_label *subj)
70858+{
70859+ unsigned int index =
70860+ gr_fhash(obj->inode, obj->device, subj->obj_hash_size);
70861+ struct acl_object_label **curr;
70862+
70863+ obj->prev = NULL;
70864+
70865+ curr = &subj->obj_hash[index];
70866+ if (*curr != NULL)
70867+ (*curr)->prev = obj;
70868+
70869+ obj->next = *curr;
70870+ *curr = obj;
70871+
70872+ return;
70873+}
70874+
70875+void
70876+insert_acl_subj_label(struct acl_subject_label *obj,
70877+ struct acl_role_label *role)
70878+{
70879+ unsigned int index = gr_fhash(obj->inode, obj->device, role->subj_hash_size);
70880+ struct acl_subject_label **curr;
70881+
70882+ obj->prev = NULL;
70883+
70884+ curr = &role->subj_hash[index];
70885+ if (*curr != NULL)
70886+ (*curr)->prev = obj;
70887+
70888+ obj->next = *curr;
70889+ *curr = obj;
70890+
70891+ return;
70892+}
70893+
70894+/* derived from glibc fnmatch() 0: match, 1: no match*/
70895+
70896+static int
70897+glob_match(const char *p, const char *n)
70898+{
70899+ char c;
70900+
70901+ while ((c = *p++) != '\0') {
70902+ switch (c) {
70903+ case '?':
70904+ if (*n == '\0')
70905+ return 1;
70906+ else if (*n == '/')
70907+ return 1;
70908+ break;
70909+ case '\\':
70910+ if (*n != c)
70911+ return 1;
70912+ break;
70913+ case '*':
70914+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
70915+ if (*n == '/')
70916+ return 1;
70917+ else if (c == '?') {
70918+ if (*n == '\0')
70919+ return 1;
70920+ else
70921+ ++n;
70922+ }
70923+ }
70924+ if (c == '\0') {
70925+ return 0;
70926+ } else {
70927+ const char *endp;
70928+
70929+ if ((endp = strchr(n, '/')) == NULL)
70930+ endp = n + strlen(n);
70931+
70932+ if (c == '[') {
70933+ for (--p; n < endp; ++n)
70934+ if (!glob_match(p, n))
70935+ return 0;
70936+ } else if (c == '/') {
70937+ while (*n != '\0' && *n != '/')
70938+ ++n;
70939+ if (*n == '/' && !glob_match(p, n + 1))
70940+ return 0;
70941+ } else {
70942+ for (--p; n < endp; ++n)
70943+ if (*n == c && !glob_match(p, n))
70944+ return 0;
70945+ }
70946+
70947+ return 1;
70948+ }
70949+ case '[':
70950+ {
70951+ int not;
70952+ char cold;
70953+
70954+ if (*n == '\0' || *n == '/')
70955+ return 1;
70956+
70957+ not = (*p == '!' || *p == '^');
70958+ if (not)
70959+ ++p;
70960+
70961+ c = *p++;
70962+ for (;;) {
70963+ unsigned char fn = (unsigned char)*n;
70964+
70965+ if (c == '\0')
70966+ return 1;
70967+ else {
70968+ if (c == fn)
70969+ goto matched;
70970+ cold = c;
70971+ c = *p++;
70972+
70973+ if (c == '-' && *p != ']') {
70974+ unsigned char cend = *p++;
70975+
70976+ if (cend == '\0')
70977+ return 1;
70978+
70979+ if (cold <= fn && fn <= cend)
70980+ goto matched;
70981+
70982+ c = *p++;
70983+ }
70984+ }
70985+
70986+ if (c == ']')
70987+ break;
70988+ }
70989+ if (!not)
70990+ return 1;
70991+ break;
70992+ matched:
70993+ while (c != ']') {
70994+ if (c == '\0')
70995+ return 1;
70996+
70997+ c = *p++;
70998+ }
70999+ if (not)
71000+ return 1;
71001+ }
71002+ break;
71003+ default:
71004+ if (c != *n)
71005+ return 1;
71006+ }
71007+
71008+ ++n;
71009+ }
71010+
71011+ if (*n == '\0')
71012+ return 0;
71013+
71014+ if (*n == '/')
71015+ return 0;
71016+
71017+ return 1;
71018+}
71019+
71020+static struct acl_object_label *
71021+chk_glob_label(struct acl_object_label *globbed,
71022+ const struct dentry *dentry, const struct vfsmount *mnt, char **path)
71023+{
71024+ struct acl_object_label *tmp;
71025+
71026+ if (*path == NULL)
71027+ *path = gr_to_filename_nolock(dentry, mnt);
71028+
71029+ tmp = globbed;
71030+
71031+ while (tmp) {
71032+ if (!glob_match(tmp->filename, *path))
71033+ return tmp;
71034+ tmp = tmp->next;
71035+ }
71036+
71037+ return NULL;
71038+}
71039+
71040+static struct acl_object_label *
71041+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
71042+ const ino_t curr_ino, const dev_t curr_dev,
71043+ const struct acl_subject_label *subj, char **path, const int checkglob)
71044+{
71045+ struct acl_subject_label *tmpsubj;
71046+ struct acl_object_label *retval;
71047+ struct acl_object_label *retval2;
71048+
71049+ tmpsubj = (struct acl_subject_label *) subj;
71050+ read_lock(&gr_inode_lock);
71051+ do {
71052+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
71053+ if (retval) {
71054+ if (checkglob && retval->globbed) {
71055+ retval2 = chk_glob_label(retval->globbed, orig_dentry, orig_mnt, path);
71056+ if (retval2)
71057+ retval = retval2;
71058+ }
71059+ break;
71060+ }
71061+ } while ((tmpsubj = tmpsubj->parent_subject));
71062+ read_unlock(&gr_inode_lock);
71063+
71064+ return retval;
71065+}
71066+
71067+static __inline__ struct acl_object_label *
71068+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
71069+ struct dentry *curr_dentry,
71070+ const struct acl_subject_label *subj, char **path, const int checkglob)
71071+{
71072+ int newglob = checkglob;
71073+ ino_t inode;
71074+ dev_t device;
71075+
71076+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
71077+ as we don't want a / * rule to match instead of the / object
71078+ don't do this for create lookups that call this function though, since they're looking up
71079+ on the parent and thus need globbing checks on all paths
71080+ */
71081+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
71082+ newglob = GR_NO_GLOB;
71083+
71084+ spin_lock(&curr_dentry->d_lock);
71085+ inode = curr_dentry->d_inode->i_ino;
71086+ device = __get_dev(curr_dentry);
71087+ spin_unlock(&curr_dentry->d_lock);
71088+
71089+ return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
71090+}
71091+
71092+#ifdef CONFIG_HUGETLBFS
71093+static inline bool
71094+is_hugetlbfs_mnt(const struct vfsmount *mnt)
71095+{
71096+ int i;
71097+ for (i = 0; i < HUGE_MAX_HSTATE; i++) {
71098+ if (unlikely(hugetlbfs_vfsmount[i] == mnt))
71099+ return true;
71100+ }
71101+
71102+ return false;
71103+}
71104+#endif
71105+
71106+static struct acl_object_label *
71107+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
71108+ const struct acl_subject_label *subj, char *path, const int checkglob)
71109+{
71110+ struct dentry *dentry = (struct dentry *) l_dentry;
71111+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
71112+ struct mount *real_mnt = real_mount(mnt);
71113+ struct acl_object_label *retval;
71114+ struct dentry *parent;
71115+
71116+ read_seqlock_excl(&mount_lock);
71117+ write_seqlock(&rename_lock);
71118+
71119+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
71120+#ifdef CONFIG_NET
71121+ mnt == sock_mnt ||
71122+#endif
71123+#ifdef CONFIG_HUGETLBFS
71124+ (is_hugetlbfs_mnt(mnt) && dentry->d_inode->i_nlink == 0) ||
71125+#endif
71126+ /* ignore Eric Biederman */
71127+ IS_PRIVATE(l_dentry->d_inode))) {
71128+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
71129+ goto out;
71130+ }
71131+
71132+ for (;;) {
71133+ if (dentry == gr_real_root.dentry && mnt == gr_real_root.mnt)
71134+ break;
71135+
71136+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
71137+ if (!mnt_has_parent(real_mnt))
71138+ break;
71139+
71140+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
71141+ if (retval != NULL)
71142+ goto out;
71143+
71144+ dentry = real_mnt->mnt_mountpoint;
71145+ real_mnt = real_mnt->mnt_parent;
71146+ mnt = &real_mnt->mnt;
71147+ continue;
71148+ }
71149+
71150+ parent = dentry->d_parent;
71151+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
71152+ if (retval != NULL)
71153+ goto out;
71154+
71155+ dentry = parent;
71156+ }
71157+
71158+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
71159+
71160+ /* gr_real_root is pinned so we don't have to hold a reference */
71161+ if (retval == NULL)
71162+ retval = full_lookup(l_dentry, l_mnt, gr_real_root.dentry, subj, &path, checkglob);
71163+out:
71164+ write_sequnlock(&rename_lock);
71165+ read_sequnlock_excl(&mount_lock);
71166+
71167+ BUG_ON(retval == NULL);
71168+
71169+ return retval;
71170+}
71171+
71172+static __inline__ struct acl_object_label *
71173+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
71174+ const struct acl_subject_label *subj)
71175+{
71176+ char *path = NULL;
71177+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
71178+}
71179+
71180+static __inline__ struct acl_object_label *
71181+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
71182+ const struct acl_subject_label *subj)
71183+{
71184+ char *path = NULL;
71185+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
71186+}
71187+
71188+static __inline__ struct acl_object_label *
71189+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
71190+ const struct acl_subject_label *subj, char *path)
71191+{
71192+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
71193+}
71194+
71195+struct acl_subject_label *
71196+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
71197+ const struct acl_role_label *role)
71198+{
71199+ struct dentry *dentry = (struct dentry *) l_dentry;
71200+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
71201+ struct mount *real_mnt = real_mount(mnt);
71202+ struct acl_subject_label *retval;
71203+ struct dentry *parent;
71204+
71205+ read_seqlock_excl(&mount_lock);
71206+ write_seqlock(&rename_lock);
71207+
71208+ for (;;) {
71209+ if (dentry == gr_real_root.dentry && mnt == gr_real_root.mnt)
71210+ break;
71211+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
71212+ if (!mnt_has_parent(real_mnt))
71213+ break;
71214+
71215+ spin_lock(&dentry->d_lock);
71216+ read_lock(&gr_inode_lock);
71217+ retval =
71218+ lookup_acl_subj_label(dentry->d_inode->i_ino,
71219+ __get_dev(dentry), role);
71220+ read_unlock(&gr_inode_lock);
71221+ spin_unlock(&dentry->d_lock);
71222+ if (retval != NULL)
71223+ goto out;
71224+
71225+ dentry = real_mnt->mnt_mountpoint;
71226+ real_mnt = real_mnt->mnt_parent;
71227+ mnt = &real_mnt->mnt;
71228+ continue;
71229+ }
71230+
71231+ spin_lock(&dentry->d_lock);
71232+ read_lock(&gr_inode_lock);
71233+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
71234+ __get_dev(dentry), role);
71235+ read_unlock(&gr_inode_lock);
71236+ parent = dentry->d_parent;
71237+ spin_unlock(&dentry->d_lock);
71238+
71239+ if (retval != NULL)
71240+ goto out;
71241+
71242+ dentry = parent;
71243+ }
71244+
71245+ spin_lock(&dentry->d_lock);
71246+ read_lock(&gr_inode_lock);
71247+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
71248+ __get_dev(dentry), role);
71249+ read_unlock(&gr_inode_lock);
71250+ spin_unlock(&dentry->d_lock);
71251+
71252+ if (unlikely(retval == NULL)) {
71253+ /* gr_real_root is pinned, we don't need to hold a reference */
71254+ read_lock(&gr_inode_lock);
71255+ retval = lookup_acl_subj_label(gr_real_root.dentry->d_inode->i_ino,
71256+ __get_dev(gr_real_root.dentry), role);
71257+ read_unlock(&gr_inode_lock);
71258+ }
71259+out:
71260+ write_sequnlock(&rename_lock);
71261+ read_sequnlock_excl(&mount_lock);
71262+
71263+ BUG_ON(retval == NULL);
71264+
71265+ return retval;
71266+}
71267+
71268+void
71269+assign_special_role(const char *rolename)
71270+{
71271+ struct acl_object_label *obj;
71272+ struct acl_role_label *r;
71273+ struct acl_role_label *assigned = NULL;
71274+ struct task_struct *tsk;
71275+ struct file *filp;
71276+
71277+ FOR_EACH_ROLE_START(r)
71278+ if (!strcmp(rolename, r->rolename) &&
71279+ (r->roletype & GR_ROLE_SPECIAL)) {
71280+ assigned = r;
71281+ break;
71282+ }
71283+ FOR_EACH_ROLE_END(r)
71284+
71285+ if (!assigned)
71286+ return;
71287+
71288+ read_lock(&tasklist_lock);
71289+ read_lock(&grsec_exec_file_lock);
71290+
71291+ tsk = current->real_parent;
71292+ if (tsk == NULL)
71293+ goto out_unlock;
71294+
71295+ filp = tsk->exec_file;
71296+ if (filp == NULL)
71297+ goto out_unlock;
71298+
71299+ tsk->is_writable = 0;
71300+ tsk->inherited = 0;
71301+
71302+ tsk->acl_sp_role = 1;
71303+ tsk->acl_role_id = ++acl_sp_role_value;
71304+ tsk->role = assigned;
71305+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
71306+
71307+ /* ignore additional mmap checks for processes that are writable
71308+ by the default ACL */
71309+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
71310+ if (unlikely(obj->mode & GR_WRITE))
71311+ tsk->is_writable = 1;
71312+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
71313+ if (unlikely(obj->mode & GR_WRITE))
71314+ tsk->is_writable = 1;
71315+
71316+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
71317+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename,
71318+ tsk->acl->filename, tsk->comm, task_pid_nr(tsk));
71319+#endif
71320+
71321+out_unlock:
71322+ read_unlock(&grsec_exec_file_lock);
71323+ read_unlock(&tasklist_lock);
71324+ return;
71325+}
71326+
71327+
71328+static void
71329+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
71330+{
71331+ struct task_struct *task = current;
71332+ const struct cred *cred = current_cred();
71333+
71334+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
71335+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
71336+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
71337+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
71338+
71339+ return;
71340+}
71341+
71342+static void
71343+gr_log_learn_uid_change(const kuid_t real, const kuid_t effective, const kuid_t fs)
71344+{
71345+ struct task_struct *task = current;
71346+ const struct cred *cred = current_cred();
71347+
71348+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
71349+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
71350+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
71351+ 'u', GR_GLOBAL_UID(real), GR_GLOBAL_UID(effective), GR_GLOBAL_UID(fs), &task->signal->saved_ip);
71352+
71353+ return;
71354+}
71355+
71356+static void
71357+gr_log_learn_gid_change(const kgid_t real, const kgid_t effective, const kgid_t fs)
71358+{
71359+ struct task_struct *task = current;
71360+ const struct cred *cred = current_cred();
71361+
71362+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
71363+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
71364+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
71365+ 'g', GR_GLOBAL_GID(real), GR_GLOBAL_GID(effective), GR_GLOBAL_GID(fs), &task->signal->saved_ip);
71366+
71367+ return;
71368+}
71369+
71370+static void
71371+gr_set_proc_res(struct task_struct *task)
71372+{
71373+ struct acl_subject_label *proc;
71374+ unsigned short i;
71375+
71376+ proc = task->acl;
71377+
71378+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
71379+ return;
71380+
71381+ for (i = 0; i < RLIM_NLIMITS; i++) {
71382+ if (!(proc->resmask & (1U << i)))
71383+ continue;
71384+
71385+ task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
71386+ task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
71387+
71388+ if (i == RLIMIT_CPU)
71389+ update_rlimit_cpu(task, proc->res[i].rlim_cur);
71390+ }
71391+
71392+ return;
71393+}
71394+
71395+/* both of the below must be called with
71396+ rcu_read_lock();
71397+ read_lock(&tasklist_lock);
71398+ read_lock(&grsec_exec_file_lock);
71399+*/
71400+
71401+struct acl_subject_label *__gr_get_subject_for_task(const struct gr_policy_state *state, struct task_struct *task, const char *filename)
71402+{
71403+ char *tmpname;
71404+ struct acl_subject_label *tmpsubj;
71405+ struct file *filp;
71406+ struct name_entry *nmatch;
71407+
71408+ filp = task->exec_file;
71409+ if (filp == NULL)
71410+ return NULL;
71411+
71412+ /* the following is to apply the correct subject
71413+ on binaries running when the RBAC system
71414+ is enabled, when the binaries have been
71415+ replaced or deleted since their execution
71416+ -----
71417+ when the RBAC system starts, the inode/dev
71418+ from exec_file will be one the RBAC system
71419+ is unaware of. It only knows the inode/dev
71420+ of the present file on disk, or the absence
71421+ of it.
71422+ */
71423+
71424+ if (filename)
71425+ nmatch = __lookup_name_entry(state, filename);
71426+ else {
71427+ preempt_disable();
71428+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
71429+
71430+ nmatch = __lookup_name_entry(state, tmpname);
71431+ preempt_enable();
71432+ }
71433+ tmpsubj = NULL;
71434+ if (nmatch) {
71435+ if (nmatch->deleted)
71436+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
71437+ else
71438+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
71439+ }
71440+ /* this also works for the reload case -- if we don't match a potentially inherited subject
71441+ then we fall back to a normal lookup based on the binary's ino/dev
71442+ */
71443+ if (tmpsubj == NULL)
71444+ tmpsubj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, task->role);
71445+
71446+ return tmpsubj;
71447+}
71448+
71449+static struct acl_subject_label *gr_get_subject_for_task(struct task_struct *task, const char *filename)
71450+{
71451+ return __gr_get_subject_for_task(&running_polstate, task, filename);
71452+}
71453+
71454+void __gr_apply_subject_to_task(const struct gr_policy_state *state, struct task_struct *task, struct acl_subject_label *subj)
71455+{
71456+ struct acl_object_label *obj;
71457+ struct file *filp;
71458+
71459+ filp = task->exec_file;
71460+
71461+ task->acl = subj;
71462+ task->is_writable = 0;
71463+ /* ignore additional mmap checks for processes that are writable
71464+ by the default ACL */
71465+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, state->default_role->root_label);
71466+ if (unlikely(obj->mode & GR_WRITE))
71467+ task->is_writable = 1;
71468+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
71469+ if (unlikely(obj->mode & GR_WRITE))
71470+ task->is_writable = 1;
71471+
71472+ gr_set_proc_res(task);
71473+
71474+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
71475+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
71476+#endif
71477+}
71478+
71479+static void gr_apply_subject_to_task(struct task_struct *task, struct acl_subject_label *subj)
71480+{
71481+ __gr_apply_subject_to_task(&running_polstate, task, subj);
71482+}
71483+
71484+__u32
71485+gr_search_file(const struct dentry * dentry, const __u32 mode,
71486+ const struct vfsmount * mnt)
71487+{
71488+ __u32 retval = mode;
71489+ struct acl_subject_label *curracl;
71490+ struct acl_object_label *currobj;
71491+
71492+ if (unlikely(!(gr_status & GR_READY)))
71493+ return (mode & ~GR_AUDITS);
71494+
71495+ curracl = current->acl;
71496+
71497+ currobj = chk_obj_label(dentry, mnt, curracl);
71498+ retval = currobj->mode & mode;
71499+
71500+ /* if we're opening a specified transfer file for writing
71501+ (e.g. /dev/initctl), then transfer our role to init
71502+ */
71503+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
71504+ current->role->roletype & GR_ROLE_PERSIST)) {
71505+ struct task_struct *task = init_pid_ns.child_reaper;
71506+
71507+ if (task->role != current->role) {
71508+ struct acl_subject_label *subj;
71509+
71510+ task->acl_sp_role = 0;
71511+ task->acl_role_id = current->acl_role_id;
71512+ task->role = current->role;
71513+ rcu_read_lock();
71514+ read_lock(&grsec_exec_file_lock);
71515+ subj = gr_get_subject_for_task(task, NULL);
71516+ gr_apply_subject_to_task(task, subj);
71517+ read_unlock(&grsec_exec_file_lock);
71518+ rcu_read_unlock();
71519+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
71520+ }
71521+ }
71522+
71523+ if (unlikely
71524+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
71525+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
71526+ __u32 new_mode = mode;
71527+
71528+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
71529+
71530+ retval = new_mode;
71531+
71532+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
71533+ new_mode |= GR_INHERIT;
71534+
71535+ if (!(mode & GR_NOLEARN))
71536+ gr_log_learn(dentry, mnt, new_mode);
71537+ }
71538+
71539+ return retval;
71540+}
71541+
71542+struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
71543+ const struct dentry *parent,
71544+ const struct vfsmount *mnt)
71545+{
71546+ struct name_entry *match;
71547+ struct acl_object_label *matchpo;
71548+ struct acl_subject_label *curracl;
71549+ char *path;
71550+
71551+ if (unlikely(!(gr_status & GR_READY)))
71552+ return NULL;
71553+
71554+ preempt_disable();
71555+ path = gr_to_filename_rbac(new_dentry, mnt);
71556+ match = lookup_name_entry_create(path);
71557+
71558+ curracl = current->acl;
71559+
71560+ if (match) {
71561+ read_lock(&gr_inode_lock);
71562+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
71563+ read_unlock(&gr_inode_lock);
71564+
71565+ if (matchpo) {
71566+ preempt_enable();
71567+ return matchpo;
71568+ }
71569+ }
71570+
71571+ // lookup parent
71572+
71573+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
71574+
71575+ preempt_enable();
71576+ return matchpo;
71577+}
71578+
71579+__u32
71580+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
71581+ const struct vfsmount * mnt, const __u32 mode)
71582+{
71583+ struct acl_object_label *matchpo;
71584+ __u32 retval;
71585+
71586+ if (unlikely(!(gr_status & GR_READY)))
71587+ return (mode & ~GR_AUDITS);
71588+
71589+ matchpo = gr_get_create_object(new_dentry, parent, mnt);
71590+
71591+ retval = matchpo->mode & mode;
71592+
71593+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
71594+ && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
71595+ __u32 new_mode = mode;
71596+
71597+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
71598+
71599+ gr_log_learn(new_dentry, mnt, new_mode);
71600+ return new_mode;
71601+ }
71602+
71603+ return retval;
71604+}
71605+
71606+__u32
71607+gr_check_link(const struct dentry * new_dentry,
71608+ const struct dentry * parent_dentry,
71609+ const struct vfsmount * parent_mnt,
71610+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
71611+{
71612+ struct acl_object_label *obj;
71613+ __u32 oldmode, newmode;
71614+ __u32 needmode;
71615+ __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
71616+ GR_DELETE | GR_INHERIT;
71617+
71618+ if (unlikely(!(gr_status & GR_READY)))
71619+ return (GR_CREATE | GR_LINK);
71620+
71621+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
71622+ oldmode = obj->mode;
71623+
71624+ obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
71625+ newmode = obj->mode;
71626+
71627+ needmode = newmode & checkmodes;
71628+
71629+ // old name for hardlink must have at least the permissions of the new name
71630+ if ((oldmode & needmode) != needmode)
71631+ goto bad;
71632+
71633+ // if old name had restrictions/auditing, make sure the new name does as well
71634+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
71635+
71636+ // don't allow hardlinking of suid/sgid/fcapped files without permission
71637+ if (is_privileged_binary(old_dentry))
71638+ needmode |= GR_SETID;
71639+
71640+ if ((newmode & needmode) != needmode)
71641+ goto bad;
71642+
71643+ // enforce minimum permissions
71644+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
71645+ return newmode;
71646+bad:
71647+ needmode = oldmode;
71648+ if (is_privileged_binary(old_dentry))
71649+ needmode |= GR_SETID;
71650+
71651+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
71652+ gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
71653+ return (GR_CREATE | GR_LINK);
71654+ } else if (newmode & GR_SUPPRESS)
71655+ return GR_SUPPRESS;
71656+ else
71657+ return 0;
71658+}
71659+
71660+int
71661+gr_check_hidden_task(const struct task_struct *task)
71662+{
71663+ if (unlikely(!(gr_status & GR_READY)))
71664+ return 0;
71665+
71666+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
71667+ return 1;
71668+
71669+ return 0;
71670+}
71671+
71672+int
71673+gr_check_protected_task(const struct task_struct *task)
71674+{
71675+ if (unlikely(!(gr_status & GR_READY) || !task))
71676+ return 0;
71677+
71678+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
71679+ task->acl != current->acl)
71680+ return 1;
71681+
71682+ return 0;
71683+}
71684+
71685+int
71686+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
71687+{
71688+ struct task_struct *p;
71689+ int ret = 0;
71690+
71691+ if (unlikely(!(gr_status & GR_READY) || !pid))
71692+ return ret;
71693+
71694+ read_lock(&tasklist_lock);
71695+ do_each_pid_task(pid, type, p) {
71696+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
71697+ p->acl != current->acl) {
71698+ ret = 1;
71699+ goto out;
71700+ }
71701+ } while_each_pid_task(pid, type, p);
71702+out:
71703+ read_unlock(&tasklist_lock);
71704+
71705+ return ret;
71706+}
71707+
71708+void
71709+gr_copy_label(struct task_struct *tsk)
71710+{
71711+ struct task_struct *p = current;
71712+
71713+ tsk->inherited = p->inherited;
71714+ tsk->acl_sp_role = 0;
71715+ tsk->acl_role_id = p->acl_role_id;
71716+ tsk->acl = p->acl;
71717+ tsk->role = p->role;
71718+ tsk->signal->used_accept = 0;
71719+ tsk->signal->curr_ip = p->signal->curr_ip;
71720+ tsk->signal->saved_ip = p->signal->saved_ip;
71721+ if (p->exec_file)
71722+ get_file(p->exec_file);
71723+ tsk->exec_file = p->exec_file;
71724+ tsk->is_writable = p->is_writable;
71725+ if (unlikely(p->signal->used_accept)) {
71726+ p->signal->curr_ip = 0;
71727+ p->signal->saved_ip = 0;
71728+ }
71729+
71730+ return;
71731+}
71732+
71733+extern int gr_process_kernel_setuid_ban(struct user_struct *user);
71734+
71735+int
71736+gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
71737+{
71738+ unsigned int i;
71739+ __u16 num;
71740+ uid_t *uidlist;
71741+ uid_t curuid;
71742+ int realok = 0;
71743+ int effectiveok = 0;
71744+ int fsok = 0;
71745+ uid_t globalreal, globaleffective, globalfs;
71746+
71747+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT)
71748+ struct user_struct *user;
71749+
71750+ if (!uid_valid(real))
71751+ goto skipit;
71752+
71753+ /* find user based on global namespace */
71754+
71755+ globalreal = GR_GLOBAL_UID(real);
71756+
71757+ user = find_user(make_kuid(&init_user_ns, globalreal));
71758+ if (user == NULL)
71759+ goto skipit;
71760+
71761+ if (gr_process_kernel_setuid_ban(user)) {
71762+ /* for find_user */
71763+ free_uid(user);
71764+ return 1;
71765+ }
71766+
71767+ /* for find_user */
71768+ free_uid(user);
71769+
71770+skipit:
71771+#endif
71772+
71773+ if (unlikely(!(gr_status & GR_READY)))
71774+ return 0;
71775+
71776+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
71777+ gr_log_learn_uid_change(real, effective, fs);
71778+
71779+ num = current->acl->user_trans_num;
71780+ uidlist = current->acl->user_transitions;
71781+
71782+ if (uidlist == NULL)
71783+ return 0;
71784+
71785+ if (!uid_valid(real)) {
71786+ realok = 1;
71787+ globalreal = (uid_t)-1;
71788+ } else {
71789+ globalreal = GR_GLOBAL_UID(real);
71790+ }
71791+ if (!uid_valid(effective)) {
71792+ effectiveok = 1;
71793+ globaleffective = (uid_t)-1;
71794+ } else {
71795+ globaleffective = GR_GLOBAL_UID(effective);
71796+ }
71797+ if (!uid_valid(fs)) {
71798+ fsok = 1;
71799+ globalfs = (uid_t)-1;
71800+ } else {
71801+ globalfs = GR_GLOBAL_UID(fs);
71802+ }
71803+
71804+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
71805+ for (i = 0; i < num; i++) {
71806+ curuid = uidlist[i];
71807+ if (globalreal == curuid)
71808+ realok = 1;
71809+ if (globaleffective == curuid)
71810+ effectiveok = 1;
71811+ if (globalfs == curuid)
71812+ fsok = 1;
71813+ }
71814+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
71815+ for (i = 0; i < num; i++) {
71816+ curuid = uidlist[i];
71817+ if (globalreal == curuid)
71818+ break;
71819+ if (globaleffective == curuid)
71820+ break;
71821+ if (globalfs == curuid)
71822+ break;
71823+ }
71824+ /* not in deny list */
71825+ if (i == num) {
71826+ realok = 1;
71827+ effectiveok = 1;
71828+ fsok = 1;
71829+ }
71830+ }
71831+
71832+ if (realok && effectiveok && fsok)
71833+ return 0;
71834+ else {
71835+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
71836+ return 1;
71837+ }
71838+}
71839+
71840+int
71841+gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
71842+{
71843+ unsigned int i;
71844+ __u16 num;
71845+ gid_t *gidlist;
71846+ gid_t curgid;
71847+ int realok = 0;
71848+ int effectiveok = 0;
71849+ int fsok = 0;
71850+ gid_t globalreal, globaleffective, globalfs;
71851+
71852+ if (unlikely(!(gr_status & GR_READY)))
71853+ return 0;
71854+
71855+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
71856+ gr_log_learn_gid_change(real, effective, fs);
71857+
71858+ num = current->acl->group_trans_num;
71859+ gidlist = current->acl->group_transitions;
71860+
71861+ if (gidlist == NULL)
71862+ return 0;
71863+
71864+ if (!gid_valid(real)) {
71865+ realok = 1;
71866+ globalreal = (gid_t)-1;
71867+ } else {
71868+ globalreal = GR_GLOBAL_GID(real);
71869+ }
71870+ if (!gid_valid(effective)) {
71871+ effectiveok = 1;
71872+ globaleffective = (gid_t)-1;
71873+ } else {
71874+ globaleffective = GR_GLOBAL_GID(effective);
71875+ }
71876+ if (!gid_valid(fs)) {
71877+ fsok = 1;
71878+ globalfs = (gid_t)-1;
71879+ } else {
71880+ globalfs = GR_GLOBAL_GID(fs);
71881+ }
71882+
71883+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
71884+ for (i = 0; i < num; i++) {
71885+ curgid = gidlist[i];
71886+ if (globalreal == curgid)
71887+ realok = 1;
71888+ if (globaleffective == curgid)
71889+ effectiveok = 1;
71890+ if (globalfs == curgid)
71891+ fsok = 1;
71892+ }
71893+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
71894+ for (i = 0; i < num; i++) {
71895+ curgid = gidlist[i];
71896+ if (globalreal == curgid)
71897+ break;
71898+ if (globaleffective == curgid)
71899+ break;
71900+ if (globalfs == curgid)
71901+ break;
71902+ }
71903+ /* not in deny list */
71904+ if (i == num) {
71905+ realok = 1;
71906+ effectiveok = 1;
71907+ fsok = 1;
71908+ }
71909+ }
71910+
71911+ if (realok && effectiveok && fsok)
71912+ return 0;
71913+ else {
71914+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
71915+ return 1;
71916+ }
71917+}
71918+
71919+extern int gr_acl_is_capable(const int cap);
71920+
71921+void
71922+gr_set_role_label(struct task_struct *task, const kuid_t kuid, const kgid_t kgid)
71923+{
71924+ struct acl_role_label *role = task->role;
71925+ struct acl_subject_label *subj = NULL;
71926+ struct acl_object_label *obj;
71927+ struct file *filp;
71928+ uid_t uid;
71929+ gid_t gid;
71930+
71931+ if (unlikely(!(gr_status & GR_READY)))
71932+ return;
71933+
71934+ uid = GR_GLOBAL_UID(kuid);
71935+ gid = GR_GLOBAL_GID(kgid);
71936+
71937+ filp = task->exec_file;
71938+
71939+ /* kernel process, we'll give them the kernel role */
71940+ if (unlikely(!filp)) {
71941+ task->role = running_polstate.kernel_role;
71942+ task->acl = running_polstate.kernel_role->root_label;
71943+ return;
71944+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL)) {
71945+ /* save the current ip at time of role lookup so that the proper
71946+ IP will be learned for role_allowed_ip */
71947+ task->signal->saved_ip = task->signal->curr_ip;
71948+ role = lookup_acl_role_label(task, uid, gid);
71949+ }
71950+
71951+ /* don't change the role if we're not a privileged process */
71952+ if (role && task->role != role &&
71953+ (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
71954+ ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
71955+ return;
71956+
71957+ /* perform subject lookup in possibly new role
71958+ we can use this result below in the case where role == task->role
71959+ */
71960+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
71961+
71962+ /* if we changed uid/gid, but result in the same role
71963+ and are using inheritance, don't lose the inherited subject
71964+ if current subject is other than what normal lookup
71965+ would result in, we arrived via inheritance, don't
71966+ lose subject
71967+ */
71968+ if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
71969+ (subj == task->acl)))
71970+ task->acl = subj;
71971+
71972+ /* leave task->inherited unaffected */
71973+
71974+ task->role = role;
71975+
71976+ task->is_writable = 0;
71977+
71978+ /* ignore additional mmap checks for processes that are writable
71979+ by the default ACL */
71980+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
71981+ if (unlikely(obj->mode & GR_WRITE))
71982+ task->is_writable = 1;
71983+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
71984+ if (unlikely(obj->mode & GR_WRITE))
71985+ task->is_writable = 1;
71986+
71987+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
71988+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
71989+#endif
71990+
71991+ gr_set_proc_res(task);
71992+
71993+ return;
71994+}
71995+
71996+int
71997+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
71998+ const int unsafe_flags)
71999+{
72000+ struct task_struct *task = current;
72001+ struct acl_subject_label *newacl;
72002+ struct acl_object_label *obj;
72003+ __u32 retmode;
72004+
72005+ if (unlikely(!(gr_status & GR_READY)))
72006+ return 0;
72007+
72008+ newacl = chk_subj_label(dentry, mnt, task->role);
72009+
72010+ /* special handling for if we did an strace -f -p <pid> from an admin role, where pid then
72011+ did an exec
72012+ */
72013+ rcu_read_lock();
72014+ read_lock(&tasklist_lock);
72015+ if (task->ptrace && task->parent && ((task->parent->role->roletype & GR_ROLE_GOD) ||
72016+ (task->parent->acl->mode & GR_POVERRIDE))) {
72017+ read_unlock(&tasklist_lock);
72018+ rcu_read_unlock();
72019+ goto skip_check;
72020+ }
72021+ read_unlock(&tasklist_lock);
72022+ rcu_read_unlock();
72023+
72024+ if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
72025+ !(task->role->roletype & GR_ROLE_GOD) &&
72026+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
72027+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
72028+ if (unsafe_flags & LSM_UNSAFE_SHARE)
72029+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
72030+ else
72031+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
72032+ return -EACCES;
72033+ }
72034+
72035+skip_check:
72036+
72037+ obj = chk_obj_label(dentry, mnt, task->acl);
72038+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
72039+
72040+ if (!(task->acl->mode & GR_INHERITLEARN) &&
72041+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
72042+ if (obj->nested)
72043+ task->acl = obj->nested;
72044+ else
72045+ task->acl = newacl;
72046+ task->inherited = 0;
72047+ } else {
72048+ task->inherited = 1;
72049+ if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
72050+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
72051+ }
72052+
72053+ task->is_writable = 0;
72054+
72055+ /* ignore additional mmap checks for processes that are writable
72056+ by the default ACL */
72057+ obj = chk_obj_label(dentry, mnt, running_polstate.default_role->root_label);
72058+ if (unlikely(obj->mode & GR_WRITE))
72059+ task->is_writable = 1;
72060+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
72061+ if (unlikely(obj->mode & GR_WRITE))
72062+ task->is_writable = 1;
72063+
72064+ gr_set_proc_res(task);
72065+
72066+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
72067+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
72068+#endif
72069+ return 0;
72070+}
72071+
72072+/* always called with valid inodev ptr */
72073+static void
72074+do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
72075+{
72076+ struct acl_object_label *matchpo;
72077+ struct acl_subject_label *matchps;
72078+ struct acl_subject_label *subj;
72079+ struct acl_role_label *role;
72080+ unsigned int x;
72081+
72082+ FOR_EACH_ROLE_START(role)
72083+ FOR_EACH_SUBJECT_START(role, subj, x)
72084+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
72085+ matchpo->mode |= GR_DELETED;
72086+ FOR_EACH_SUBJECT_END(subj,x)
72087+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
72088+ /* nested subjects aren't in the role's subj_hash table */
72089+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
72090+ matchpo->mode |= GR_DELETED;
72091+ FOR_EACH_NESTED_SUBJECT_END(subj)
72092+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
72093+ matchps->mode |= GR_DELETED;
72094+ FOR_EACH_ROLE_END(role)
72095+
72096+ inodev->nentry->deleted = 1;
72097+
72098+ return;
72099+}
72100+
72101+void
72102+gr_handle_delete(const ino_t ino, const dev_t dev)
72103+{
72104+ struct inodev_entry *inodev;
72105+
72106+ if (unlikely(!(gr_status & GR_READY)))
72107+ return;
72108+
72109+ write_lock(&gr_inode_lock);
72110+ inodev = lookup_inodev_entry(ino, dev);
72111+ if (inodev != NULL)
72112+ do_handle_delete(inodev, ino, dev);
72113+ write_unlock(&gr_inode_lock);
72114+
72115+ return;
72116+}
72117+
72118+static void
72119+update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
72120+ const ino_t newinode, const dev_t newdevice,
72121+ struct acl_subject_label *subj)
72122+{
72123+ unsigned int index = gr_fhash(oldinode, olddevice, subj->obj_hash_size);
72124+ struct acl_object_label *match;
72125+
72126+ match = subj->obj_hash[index];
72127+
72128+ while (match && (match->inode != oldinode ||
72129+ match->device != olddevice ||
72130+ !(match->mode & GR_DELETED)))
72131+ match = match->next;
72132+
72133+ if (match && (match->inode == oldinode)
72134+ && (match->device == olddevice)
72135+ && (match->mode & GR_DELETED)) {
72136+ if (match->prev == NULL) {
72137+ subj->obj_hash[index] = match->next;
72138+ if (match->next != NULL)
72139+ match->next->prev = NULL;
72140+ } else {
72141+ match->prev->next = match->next;
72142+ if (match->next != NULL)
72143+ match->next->prev = match->prev;
72144+ }
72145+ match->prev = NULL;
72146+ match->next = NULL;
72147+ match->inode = newinode;
72148+ match->device = newdevice;
72149+ match->mode &= ~GR_DELETED;
72150+
72151+ insert_acl_obj_label(match, subj);
72152+ }
72153+
72154+ return;
72155+}
72156+
72157+static void
72158+update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
72159+ const ino_t newinode, const dev_t newdevice,
72160+ struct acl_role_label *role)
72161+{
72162+ unsigned int index = gr_fhash(oldinode, olddevice, role->subj_hash_size);
72163+ struct acl_subject_label *match;
72164+
72165+ match = role->subj_hash[index];
72166+
72167+ while (match && (match->inode != oldinode ||
72168+ match->device != olddevice ||
72169+ !(match->mode & GR_DELETED)))
72170+ match = match->next;
72171+
72172+ if (match && (match->inode == oldinode)
72173+ && (match->device == olddevice)
72174+ && (match->mode & GR_DELETED)) {
72175+ if (match->prev == NULL) {
72176+ role->subj_hash[index] = match->next;
72177+ if (match->next != NULL)
72178+ match->next->prev = NULL;
72179+ } else {
72180+ match->prev->next = match->next;
72181+ if (match->next != NULL)
72182+ match->next->prev = match->prev;
72183+ }
72184+ match->prev = NULL;
72185+ match->next = NULL;
72186+ match->inode = newinode;
72187+ match->device = newdevice;
72188+ match->mode &= ~GR_DELETED;
72189+
72190+ insert_acl_subj_label(match, role);
72191+ }
72192+
72193+ return;
72194+}
72195+
72196+static void
72197+update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
72198+ const ino_t newinode, const dev_t newdevice)
72199+{
72200+ unsigned int index = gr_fhash(oldinode, olddevice, running_polstate.inodev_set.i_size);
72201+ struct inodev_entry *match;
72202+
72203+ match = running_polstate.inodev_set.i_hash[index];
72204+
72205+ while (match && (match->nentry->inode != oldinode ||
72206+ match->nentry->device != olddevice || !match->nentry->deleted))
72207+ match = match->next;
72208+
72209+ if (match && (match->nentry->inode == oldinode)
72210+ && (match->nentry->device == olddevice) &&
72211+ match->nentry->deleted) {
72212+ if (match->prev == NULL) {
72213+ running_polstate.inodev_set.i_hash[index] = match->next;
72214+ if (match->next != NULL)
72215+ match->next->prev = NULL;
72216+ } else {
72217+ match->prev->next = match->next;
72218+ if (match->next != NULL)
72219+ match->next->prev = match->prev;
72220+ }
72221+ match->prev = NULL;
72222+ match->next = NULL;
72223+ match->nentry->inode = newinode;
72224+ match->nentry->device = newdevice;
72225+ match->nentry->deleted = 0;
72226+
72227+ insert_inodev_entry(match);
72228+ }
72229+
72230+ return;
72231+}
72232+
72233+static void
72234+__do_handle_create(const struct name_entry *matchn, ino_t ino, dev_t dev)
72235+{
72236+ struct acl_subject_label *subj;
72237+ struct acl_role_label *role;
72238+ unsigned int x;
72239+
72240+ FOR_EACH_ROLE_START(role)
72241+ update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
72242+
72243+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
72244+ if ((subj->inode == ino) && (subj->device == dev)) {
72245+ subj->inode = ino;
72246+ subj->device = dev;
72247+ }
72248+ /* nested subjects aren't in the role's subj_hash table */
72249+ update_acl_obj_label(matchn->inode, matchn->device,
72250+ ino, dev, subj);
72251+ FOR_EACH_NESTED_SUBJECT_END(subj)
72252+ FOR_EACH_SUBJECT_START(role, subj, x)
72253+ update_acl_obj_label(matchn->inode, matchn->device,
72254+ ino, dev, subj);
72255+ FOR_EACH_SUBJECT_END(subj,x)
72256+ FOR_EACH_ROLE_END(role)
72257+
72258+ update_inodev_entry(matchn->inode, matchn->device, ino, dev);
72259+
72260+ return;
72261+}
72262+
72263+static void
72264+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
72265+ const struct vfsmount *mnt)
72266+{
72267+ ino_t ino = dentry->d_inode->i_ino;
72268+ dev_t dev = __get_dev(dentry);
72269+
72270+ __do_handle_create(matchn, ino, dev);
72271+
72272+ return;
72273+}
72274+
72275+void
72276+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
72277+{
72278+ struct name_entry *matchn;
72279+
72280+ if (unlikely(!(gr_status & GR_READY)))
72281+ return;
72282+
72283+ preempt_disable();
72284+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
72285+
72286+ if (unlikely((unsigned long)matchn)) {
72287+ write_lock(&gr_inode_lock);
72288+ do_handle_create(matchn, dentry, mnt);
72289+ write_unlock(&gr_inode_lock);
72290+ }
72291+ preempt_enable();
72292+
72293+ return;
72294+}
72295+
72296+void
72297+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
72298+{
72299+ struct name_entry *matchn;
72300+
72301+ if (unlikely(!(gr_status & GR_READY)))
72302+ return;
72303+
72304+ preempt_disable();
72305+ matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
72306+
72307+ if (unlikely((unsigned long)matchn)) {
72308+ write_lock(&gr_inode_lock);
72309+ __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
72310+ write_unlock(&gr_inode_lock);
72311+ }
72312+ preempt_enable();
72313+
72314+ return;
72315+}
72316+
72317+void
72318+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
72319+ struct dentry *old_dentry,
72320+ struct dentry *new_dentry,
72321+ struct vfsmount *mnt, const __u8 replace, unsigned int flags)
72322+{
72323+ struct name_entry *matchn;
72324+ struct name_entry *matchn2 = NULL;
72325+ struct inodev_entry *inodev;
72326+ struct inode *inode = new_dentry->d_inode;
72327+ ino_t old_ino = old_dentry->d_inode->i_ino;
72328+ dev_t old_dev = __get_dev(old_dentry);
72329+ unsigned int exchange = flags & RENAME_EXCHANGE;
72330+
72331+ /* vfs_rename swaps the name and parent link for old_dentry and
72332+ new_dentry
72333+ at this point, old_dentry has the new name, parent link, and inode
72334+ for the renamed file
72335+ if a file is being replaced by a rename, new_dentry has the inode
72336+ and name for the replaced file
72337+ */
72338+
72339+ if (unlikely(!(gr_status & GR_READY)))
72340+ return;
72341+
72342+ preempt_disable();
72343+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
72344+
72345+ /* exchange cases:
72346+ a filename exists for the source, but not dest
72347+ do a recreate on source
72348+ a filename exists for the dest, but not source
72349+ do a recreate on dest
72350+ a filename exists for both source and dest
72351+ delete source and dest, then create source and dest
72352+ a filename exists for neither source nor dest
72353+ no updates needed
72354+
72355+ the name entry lookups get us the old inode/dev associated with
72356+ each name, so do the deletes first (if possible) so that when
72357+ we do the create, we pick up on the right entries
72358+ */
72359+
72360+ if (exchange)
72361+ matchn2 = lookup_name_entry(gr_to_filename_rbac(new_dentry, mnt));
72362+
72363+ /* we wouldn't have to check d_inode if it weren't for
72364+ NFS silly-renaming
72365+ */
72366+
72367+ write_lock(&gr_inode_lock);
72368+ if (unlikely((replace || exchange) && inode)) {
72369+ ino_t new_ino = inode->i_ino;
72370+ dev_t new_dev = __get_dev(new_dentry);
72371+
72372+ inodev = lookup_inodev_entry(new_ino, new_dev);
72373+ if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
72374+ do_handle_delete(inodev, new_ino, new_dev);
72375+ }
72376+
72377+ inodev = lookup_inodev_entry(old_ino, old_dev);
72378+ if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
72379+ do_handle_delete(inodev, old_ino, old_dev);
72380+
72381+ if (unlikely(matchn != NULL))
72382+ do_handle_create(matchn, old_dentry, mnt);
72383+
72384+ if (unlikely(matchn2 != NULL))
72385+ do_handle_create(matchn2, new_dentry, mnt);
72386+
72387+ write_unlock(&gr_inode_lock);
72388+ preempt_enable();
72389+
72390+ return;
72391+}
72392+
72393+#if defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC)
72394+static const unsigned long res_learn_bumps[GR_NLIMITS] = {
72395+ [RLIMIT_CPU] = GR_RLIM_CPU_BUMP,
72396+ [RLIMIT_FSIZE] = GR_RLIM_FSIZE_BUMP,
72397+ [RLIMIT_DATA] = GR_RLIM_DATA_BUMP,
72398+ [RLIMIT_STACK] = GR_RLIM_STACK_BUMP,
72399+ [RLIMIT_CORE] = GR_RLIM_CORE_BUMP,
72400+ [RLIMIT_RSS] = GR_RLIM_RSS_BUMP,
72401+ [RLIMIT_NPROC] = GR_RLIM_NPROC_BUMP,
72402+ [RLIMIT_NOFILE] = GR_RLIM_NOFILE_BUMP,
72403+ [RLIMIT_MEMLOCK] = GR_RLIM_MEMLOCK_BUMP,
72404+ [RLIMIT_AS] = GR_RLIM_AS_BUMP,
72405+ [RLIMIT_LOCKS] = GR_RLIM_LOCKS_BUMP,
72406+ [RLIMIT_SIGPENDING] = GR_RLIM_SIGPENDING_BUMP,
72407+ [RLIMIT_MSGQUEUE] = GR_RLIM_MSGQUEUE_BUMP,
72408+ [RLIMIT_NICE] = GR_RLIM_NICE_BUMP,
72409+ [RLIMIT_RTPRIO] = GR_RLIM_RTPRIO_BUMP,
72410+ [RLIMIT_RTTIME] = GR_RLIM_RTTIME_BUMP
72411+};
72412+
72413+void
72414+gr_learn_resource(const struct task_struct *task,
72415+ const int res, const unsigned long wanted, const int gt)
72416+{
72417+ struct acl_subject_label *acl;
72418+ const struct cred *cred;
72419+
72420+ if (unlikely((gr_status & GR_READY) &&
72421+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
72422+ goto skip_reslog;
72423+
72424+ gr_log_resource(task, res, wanted, gt);
72425+skip_reslog:
72426+
72427+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
72428+ return;
72429+
72430+ acl = task->acl;
72431+
72432+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
72433+ !(acl->resmask & (1U << (unsigned short) res))))
72434+ return;
72435+
72436+ if (wanted >= acl->res[res].rlim_cur) {
72437+ unsigned long res_add;
72438+
72439+ res_add = wanted + res_learn_bumps[res];
72440+
72441+ acl->res[res].rlim_cur = res_add;
72442+
72443+ if (wanted > acl->res[res].rlim_max)
72444+ acl->res[res].rlim_max = res_add;
72445+
72446+ /* only log the subject filename, since resource logging is supported for
72447+ single-subject learning only */
72448+ rcu_read_lock();
72449+ cred = __task_cred(task);
72450+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
72451+ task->role->roletype, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), acl->filename,
72452+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
72453+ "", (unsigned long) res, &task->signal->saved_ip);
72454+ rcu_read_unlock();
72455+ }
72456+
72457+ return;
72458+}
72459+EXPORT_SYMBOL_GPL(gr_learn_resource);
72460+#endif
72461+
72462+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
72463+void
72464+pax_set_initial_flags(struct linux_binprm *bprm)
72465+{
72466+ struct task_struct *task = current;
72467+ struct acl_subject_label *proc;
72468+ unsigned long flags;
72469+
72470+ if (unlikely(!(gr_status & GR_READY)))
72471+ return;
72472+
72473+ flags = pax_get_flags(task);
72474+
72475+ proc = task->acl;
72476+
72477+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
72478+ flags &= ~MF_PAX_PAGEEXEC;
72479+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
72480+ flags &= ~MF_PAX_SEGMEXEC;
72481+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
72482+ flags &= ~MF_PAX_RANDMMAP;
72483+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
72484+ flags &= ~MF_PAX_EMUTRAMP;
72485+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
72486+ flags &= ~MF_PAX_MPROTECT;
72487+
72488+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
72489+ flags |= MF_PAX_PAGEEXEC;
72490+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
72491+ flags |= MF_PAX_SEGMEXEC;
72492+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
72493+ flags |= MF_PAX_RANDMMAP;
72494+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
72495+ flags |= MF_PAX_EMUTRAMP;
72496+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
72497+ flags |= MF_PAX_MPROTECT;
72498+
72499+ pax_set_flags(task, flags);
72500+
72501+ return;
72502+}
72503+#endif
72504+
72505+int
72506+gr_handle_proc_ptrace(struct task_struct *task)
72507+{
72508+ struct file *filp;
72509+ struct task_struct *tmp = task;
72510+ struct task_struct *curtemp = current;
72511+ __u32 retmode;
72512+
72513+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
72514+ if (unlikely(!(gr_status & GR_READY)))
72515+ return 0;
72516+#endif
72517+
72518+ read_lock(&tasklist_lock);
72519+ read_lock(&grsec_exec_file_lock);
72520+ filp = task->exec_file;
72521+
72522+ while (task_pid_nr(tmp) > 0) {
72523+ if (tmp == curtemp)
72524+ break;
72525+ tmp = tmp->real_parent;
72526+ }
72527+
72528+ if (!filp || (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) ||
72529+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
72530+ read_unlock(&grsec_exec_file_lock);
72531+ read_unlock(&tasklist_lock);
72532+ return 1;
72533+ }
72534+
72535+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
72536+ if (!(gr_status & GR_READY)) {
72537+ read_unlock(&grsec_exec_file_lock);
72538+ read_unlock(&tasklist_lock);
72539+ return 0;
72540+ }
72541+#endif
72542+
72543+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
72544+ read_unlock(&grsec_exec_file_lock);
72545+ read_unlock(&tasklist_lock);
72546+
72547+ if (retmode & GR_NOPTRACE)
72548+ return 1;
72549+
72550+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
72551+ && (current->acl != task->acl || (current->acl != current->role->root_label
72552+ && task_pid_nr(current) != task_pid_nr(task))))
72553+ return 1;
72554+
72555+ return 0;
72556+}
72557+
72558+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
72559+{
72560+ if (unlikely(!(gr_status & GR_READY)))
72561+ return;
72562+
72563+ if (!(current->role->roletype & GR_ROLE_GOD))
72564+ return;
72565+
72566+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
72567+ p->role->rolename, gr_task_roletype_to_char(p),
72568+ p->acl->filename);
72569+}
72570+
72571+int
72572+gr_handle_ptrace(struct task_struct *task, const long request)
72573+{
72574+ struct task_struct *tmp = task;
72575+ struct task_struct *curtemp = current;
72576+ __u32 retmode;
72577+
72578+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
72579+ if (unlikely(!(gr_status & GR_READY)))
72580+ return 0;
72581+#endif
72582+ if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
72583+ read_lock(&tasklist_lock);
72584+ while (task_pid_nr(tmp) > 0) {
72585+ if (tmp == curtemp)
72586+ break;
72587+ tmp = tmp->real_parent;
72588+ }
72589+
72590+ if (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) ||
72591+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
72592+ read_unlock(&tasklist_lock);
72593+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
72594+ return 1;
72595+ }
72596+ read_unlock(&tasklist_lock);
72597+ }
72598+
72599+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
72600+ if (!(gr_status & GR_READY))
72601+ return 0;
72602+#endif
72603+
72604+ read_lock(&grsec_exec_file_lock);
72605+ if (unlikely(!task->exec_file)) {
72606+ read_unlock(&grsec_exec_file_lock);
72607+ return 0;
72608+ }
72609+
72610+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
72611+ read_unlock(&grsec_exec_file_lock);
72612+
72613+ if (retmode & GR_NOPTRACE) {
72614+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
72615+ return 1;
72616+ }
72617+
72618+ if (retmode & GR_PTRACERD) {
72619+ switch (request) {
72620+ case PTRACE_SEIZE:
72621+ case PTRACE_POKETEXT:
72622+ case PTRACE_POKEDATA:
72623+ case PTRACE_POKEUSR:
72624+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
72625+ case PTRACE_SETREGS:
72626+ case PTRACE_SETFPREGS:
72627+#endif
72628+#ifdef CONFIG_X86
72629+ case PTRACE_SETFPXREGS:
72630+#endif
72631+#ifdef CONFIG_ALTIVEC
72632+ case PTRACE_SETVRREGS:
72633+#endif
72634+ return 1;
72635+ default:
72636+ return 0;
72637+ }
72638+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
72639+ !(current->role->roletype & GR_ROLE_GOD) &&
72640+ (current->acl != task->acl)) {
72641+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
72642+ return 1;
72643+ }
72644+
72645+ return 0;
72646+}
72647+
72648+static int is_writable_mmap(const struct file *filp)
72649+{
72650+ struct task_struct *task = current;
72651+ struct acl_object_label *obj, *obj2;
72652+
72653+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
72654+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
72655+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
72656+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
72657+ task->role->root_label);
72658+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
72659+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
72660+ return 1;
72661+ }
72662+ }
72663+ return 0;
72664+}
72665+
72666+int
72667+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
72668+{
72669+ __u32 mode;
72670+
72671+ if (unlikely(!file || !(prot & PROT_EXEC)))
72672+ return 1;
72673+
72674+ if (is_writable_mmap(file))
72675+ return 0;
72676+
72677+ mode =
72678+ gr_search_file(file->f_path.dentry,
72679+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
72680+ file->f_path.mnt);
72681+
72682+ if (!gr_tpe_allow(file))
72683+ return 0;
72684+
72685+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
72686+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
72687+ return 0;
72688+ } else if (unlikely(!(mode & GR_EXEC))) {
72689+ return 0;
72690+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
72691+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
72692+ return 1;
72693+ }
72694+
72695+ return 1;
72696+}
72697+
72698+int
72699+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
72700+{
72701+ __u32 mode;
72702+
72703+ if (unlikely(!file || !(prot & PROT_EXEC)))
72704+ return 1;
72705+
72706+ if (is_writable_mmap(file))
72707+ return 0;
72708+
72709+ mode =
72710+ gr_search_file(file->f_path.dentry,
72711+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
72712+ file->f_path.mnt);
72713+
72714+ if (!gr_tpe_allow(file))
72715+ return 0;
72716+
72717+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
72718+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
72719+ return 0;
72720+ } else if (unlikely(!(mode & GR_EXEC))) {
72721+ return 0;
72722+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
72723+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
72724+ return 1;
72725+ }
72726+
72727+ return 1;
72728+}
72729+
72730+void
72731+gr_acl_handle_psacct(struct task_struct *task, const long code)
72732+{
72733+ unsigned long runtime, cputime;
72734+ cputime_t utime, stime;
72735+ unsigned int wday, cday;
72736+ __u8 whr, chr;
72737+ __u8 wmin, cmin;
72738+ __u8 wsec, csec;
72739+ struct timespec curtime, starttime;
72740+
72741+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
72742+ !(task->acl->mode & GR_PROCACCT)))
72743+ return;
72744+
72745+ curtime = ns_to_timespec(ktime_get_ns());
72746+ starttime = ns_to_timespec(task->start_time);
72747+ runtime = curtime.tv_sec - starttime.tv_sec;
72748+ wday = runtime / (60 * 60 * 24);
72749+ runtime -= wday * (60 * 60 * 24);
72750+ whr = runtime / (60 * 60);
72751+ runtime -= whr * (60 * 60);
72752+ wmin = runtime / 60;
72753+ runtime -= wmin * 60;
72754+ wsec = runtime;
72755+
72756+ task_cputime(task, &utime, &stime);
72757+ cputime = cputime_to_secs(utime + stime);
72758+ cday = cputime / (60 * 60 * 24);
72759+ cputime -= cday * (60 * 60 * 24);
72760+ chr = cputime / (60 * 60);
72761+ cputime -= chr * (60 * 60);
72762+ cmin = cputime / 60;
72763+ cputime -= cmin * 60;
72764+ csec = cputime;
72765+
72766+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
72767+
72768+ return;
72769+}
72770+
72771+#ifdef CONFIG_TASKSTATS
72772+int gr_is_taskstats_denied(int pid)
72773+{
72774+ struct task_struct *task;
72775+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
72776+ const struct cred *cred;
72777+#endif
72778+ int ret = 0;
72779+
72780+ /* restrict taskstats viewing to un-chrooted root users
72781+ who have the 'view' subject flag if the RBAC system is enabled
72782+ */
72783+
72784+ rcu_read_lock();
72785+ read_lock(&tasklist_lock);
72786+ task = find_task_by_vpid(pid);
72787+ if (task) {
72788+#ifdef CONFIG_GRKERNSEC_CHROOT
72789+ if (proc_is_chrooted(task))
72790+ ret = -EACCES;
72791+#endif
72792+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
72793+ cred = __task_cred(task);
72794+#ifdef CONFIG_GRKERNSEC_PROC_USER
72795+ if (gr_is_global_nonroot(cred->uid))
72796+ ret = -EACCES;
72797+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
72798+ if (gr_is_global_nonroot(cred->uid) && !groups_search(cred->group_info, grsec_proc_gid))
72799+ ret = -EACCES;
72800+#endif
72801+#endif
72802+ if (gr_status & GR_READY) {
72803+ if (!(task->acl->mode & GR_VIEW))
72804+ ret = -EACCES;
72805+ }
72806+ } else
72807+ ret = -ENOENT;
72808+
72809+ read_unlock(&tasklist_lock);
72810+ rcu_read_unlock();
72811+
72812+ return ret;
72813+}
72814+#endif
72815+
72816+/* AUXV entries are filled via a descendant of search_binary_handler
72817+ after we've already applied the subject for the target
72818+*/
72819+int gr_acl_enable_at_secure(void)
72820+{
72821+ if (unlikely(!(gr_status & GR_READY)))
72822+ return 0;
72823+
72824+ if (current->acl->mode & GR_ATSECURE)
72825+ return 1;
72826+
72827+ return 0;
72828+}
72829+
72830+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
72831+{
72832+ struct task_struct *task = current;
72833+ struct dentry *dentry = file->f_path.dentry;
72834+ struct vfsmount *mnt = file->f_path.mnt;
72835+ struct acl_object_label *obj, *tmp;
72836+ struct acl_subject_label *subj;
72837+ unsigned int bufsize;
72838+ int is_not_root;
72839+ char *path;
72840+ dev_t dev = __get_dev(dentry);
72841+
72842+ if (unlikely(!(gr_status & GR_READY)))
72843+ return 1;
72844+
72845+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
72846+ return 1;
72847+
72848+ /* ignore Eric Biederman */
72849+ if (IS_PRIVATE(dentry->d_inode))
72850+ return 1;
72851+
72852+ subj = task->acl;
72853+ read_lock(&gr_inode_lock);
72854+ do {
72855+ obj = lookup_acl_obj_label(ino, dev, subj);
72856+ if (obj != NULL) {
72857+ read_unlock(&gr_inode_lock);
72858+ return (obj->mode & GR_FIND) ? 1 : 0;
72859+ }
72860+ } while ((subj = subj->parent_subject));
72861+ read_unlock(&gr_inode_lock);
72862+
72863+ /* this is purely an optimization since we're looking for an object
72864+ for the directory we're doing a readdir on
72865+ if it's possible for any globbed object to match the entry we're
72866+ filling into the directory, then the object we find here will be
72867+ an anchor point with attached globbed objects
72868+ */
72869+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
72870+ if (obj->globbed == NULL)
72871+ return (obj->mode & GR_FIND) ? 1 : 0;
72872+
72873+ is_not_root = ((obj->filename[0] == '/') &&
72874+ (obj->filename[1] == '\0')) ? 0 : 1;
72875+ bufsize = PAGE_SIZE - namelen - is_not_root;
72876+
72877+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
72878+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
72879+ return 1;
72880+
72881+ preempt_disable();
72882+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
72883+ bufsize);
72884+
72885+ bufsize = strlen(path);
72886+
72887+ /* if base is "/", don't append an additional slash */
72888+ if (is_not_root)
72889+ *(path + bufsize) = '/';
72890+ memcpy(path + bufsize + is_not_root, name, namelen);
72891+ *(path + bufsize + namelen + is_not_root) = '\0';
72892+
72893+ tmp = obj->globbed;
72894+ while (tmp) {
72895+ if (!glob_match(tmp->filename, path)) {
72896+ preempt_enable();
72897+ return (tmp->mode & GR_FIND) ? 1 : 0;
72898+ }
72899+ tmp = tmp->next;
72900+ }
72901+ preempt_enable();
72902+ return (obj->mode & GR_FIND) ? 1 : 0;
72903+}
72904+
72905+void gr_put_exec_file(struct task_struct *task)
72906+{
72907+ struct file *filp;
72908+
72909+ write_lock(&grsec_exec_file_lock);
72910+ filp = task->exec_file;
72911+ task->exec_file = NULL;
72912+ write_unlock(&grsec_exec_file_lock);
72913+
72914+ if (filp)
72915+ fput(filp);
72916+
72917+ return;
72918+}
72919+
72920+
72921+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
72922+EXPORT_SYMBOL_GPL(gr_acl_is_enabled);
72923+#endif
72924+#ifdef CONFIG_SECURITY
72925+EXPORT_SYMBOL_GPL(gr_check_user_change);
72926+EXPORT_SYMBOL_GPL(gr_check_group_change);
72927+#endif
72928+
72929diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
72930new file mode 100644
72931index 0000000..18ffbbd
72932--- /dev/null
72933+++ b/grsecurity/gracl_alloc.c
72934@@ -0,0 +1,105 @@
72935+#include <linux/kernel.h>
72936+#include <linux/mm.h>
72937+#include <linux/slab.h>
72938+#include <linux/vmalloc.h>
72939+#include <linux/gracl.h>
72940+#include <linux/grsecurity.h>
72941+
72942+static struct gr_alloc_state __current_alloc_state = { 1, 1, NULL };
72943+struct gr_alloc_state *current_alloc_state = &__current_alloc_state;
72944+
72945+static __inline__ int
72946+alloc_pop(void)
72947+{
72948+ if (current_alloc_state->alloc_stack_next == 1)
72949+ return 0;
72950+
72951+ kfree(current_alloc_state->alloc_stack[current_alloc_state->alloc_stack_next - 2]);
72952+
72953+ current_alloc_state->alloc_stack_next--;
72954+
72955+ return 1;
72956+}
72957+
72958+static __inline__ int
72959+alloc_push(void *buf)
72960+{
72961+ if (current_alloc_state->alloc_stack_next >= current_alloc_state->alloc_stack_size)
72962+ return 1;
72963+
72964+ current_alloc_state->alloc_stack[current_alloc_state->alloc_stack_next - 1] = buf;
72965+
72966+ current_alloc_state->alloc_stack_next++;
72967+
72968+ return 0;
72969+}
72970+
72971+void *
72972+acl_alloc(unsigned long len)
72973+{
72974+ void *ret = NULL;
72975+
72976+ if (!len || len > PAGE_SIZE)
72977+ goto out;
72978+
72979+ ret = kmalloc(len, GFP_KERNEL);
72980+
72981+ if (ret) {
72982+ if (alloc_push(ret)) {
72983+ kfree(ret);
72984+ ret = NULL;
72985+ }
72986+ }
72987+
72988+out:
72989+ return ret;
72990+}
72991+
72992+void *
72993+acl_alloc_num(unsigned long num, unsigned long len)
72994+{
72995+ if (!len || (num > (PAGE_SIZE / len)))
72996+ return NULL;
72997+
72998+ return acl_alloc(num * len);
72999+}
73000+
73001+void
73002+acl_free_all(void)
73003+{
73004+ if (!current_alloc_state->alloc_stack)
73005+ return;
73006+
73007+ while (alloc_pop()) ;
73008+
73009+ if (current_alloc_state->alloc_stack) {
73010+ if ((current_alloc_state->alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
73011+ kfree(current_alloc_state->alloc_stack);
73012+ else
73013+ vfree(current_alloc_state->alloc_stack);
73014+ }
73015+
73016+ current_alloc_state->alloc_stack = NULL;
73017+ current_alloc_state->alloc_stack_size = 1;
73018+ current_alloc_state->alloc_stack_next = 1;
73019+
73020+ return;
73021+}
73022+
73023+int
73024+acl_alloc_stack_init(unsigned long size)
73025+{
73026+ if ((size * sizeof (void *)) <= PAGE_SIZE)
73027+ current_alloc_state->alloc_stack =
73028+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
73029+ else
73030+ current_alloc_state->alloc_stack = (void **) vmalloc(size * sizeof (void *));
73031+
73032+ current_alloc_state->alloc_stack_size = size;
73033+ current_alloc_state->alloc_stack_next = 1;
73034+
73035+ if (!current_alloc_state->alloc_stack)
73036+ return 0;
73037+ else
73038+ return 1;
73039+}
73040diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
73041new file mode 100644
73042index 0000000..1a94c11
73043--- /dev/null
73044+++ b/grsecurity/gracl_cap.c
73045@@ -0,0 +1,127 @@
73046+#include <linux/kernel.h>
73047+#include <linux/module.h>
73048+#include <linux/sched.h>
73049+#include <linux/gracl.h>
73050+#include <linux/grsecurity.h>
73051+#include <linux/grinternal.h>
73052+
73053+extern const char *captab_log[];
73054+extern int captab_log_entries;
73055+
73056+int gr_learn_cap(const struct task_struct *task, const struct cred *cred, const int cap)
73057+{
73058+ struct acl_subject_label *curracl;
73059+
73060+ if (!gr_acl_is_enabled())
73061+ return 1;
73062+
73063+ curracl = task->acl;
73064+
73065+ if (curracl->mode & (GR_LEARN | GR_INHERITLEARN)) {
73066+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
73067+ task->role->roletype, GR_GLOBAL_UID(cred->uid),
73068+ GR_GLOBAL_GID(cred->gid), task->exec_file ?
73069+ gr_to_filename(task->exec_file->f_path.dentry,
73070+ task->exec_file->f_path.mnt) : curracl->filename,
73071+ curracl->filename, 0UL,
73072+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
73073+ return 1;
73074+ }
73075+
73076+ return 0;
73077+}
73078+
73079+int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
73080+{
73081+ struct acl_subject_label *curracl;
73082+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
73083+ kernel_cap_t cap_audit = __cap_empty_set;
73084+
73085+ if (!gr_acl_is_enabled())
73086+ return 1;
73087+
73088+ curracl = task->acl;
73089+
73090+ cap_drop = curracl->cap_lower;
73091+ cap_mask = curracl->cap_mask;
73092+ cap_audit = curracl->cap_invert_audit;
73093+
73094+ while ((curracl = curracl->parent_subject)) {
73095+ /* if the cap isn't specified in the current computed mask but is specified in the
73096+ current level subject, and is lowered in the current level subject, then add
73097+ it to the set of dropped capabilities
73098+ otherwise, add the current level subject's mask to the current computed mask
73099+ */
73100+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
73101+ cap_raise(cap_mask, cap);
73102+ if (cap_raised(curracl->cap_lower, cap))
73103+ cap_raise(cap_drop, cap);
73104+ if (cap_raised(curracl->cap_invert_audit, cap))
73105+ cap_raise(cap_audit, cap);
73106+ }
73107+ }
73108+
73109+ if (!cap_raised(cap_drop, cap)) {
73110+ if (cap_raised(cap_audit, cap))
73111+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
73112+ return 1;
73113+ }
73114+
73115+ /* only learn the capability use if the process has the capability in the
73116+ general case, the two uses in sys.c of gr_learn_cap are an exception
73117+ to this rule to ensure any role transition involves what the full-learned
73118+ policy believes in a privileged process
73119+ */
73120+ if (cap_raised(cred->cap_effective, cap) && gr_learn_cap(task, cred, cap))
73121+ return 1;
73122+
73123+ if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
73124+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
73125+
73126+ return 0;
73127+}
73128+
73129+int
73130+gr_acl_is_capable(const int cap)
73131+{
73132+ return gr_task_acl_is_capable(current, current_cred(), cap);
73133+}
73134+
73135+int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap)
73136+{
73137+ struct acl_subject_label *curracl;
73138+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
73139+
73140+ if (!gr_acl_is_enabled())
73141+ return 1;
73142+
73143+ curracl = task->acl;
73144+
73145+ cap_drop = curracl->cap_lower;
73146+ cap_mask = curracl->cap_mask;
73147+
73148+ while ((curracl = curracl->parent_subject)) {
73149+ /* if the cap isn't specified in the current computed mask but is specified in the
73150+ current level subject, and is lowered in the current level subject, then add
73151+ it to the set of dropped capabilities
73152+ otherwise, add the current level subject's mask to the current computed mask
73153+ */
73154+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
73155+ cap_raise(cap_mask, cap);
73156+ if (cap_raised(curracl->cap_lower, cap))
73157+ cap_raise(cap_drop, cap);
73158+ }
73159+ }
73160+
73161+ if (!cap_raised(cap_drop, cap))
73162+ return 1;
73163+
73164+ return 0;
73165+}
73166+
73167+int
73168+gr_acl_is_capable_nolog(const int cap)
73169+{
73170+ return gr_task_acl_is_capable_nolog(current, cap);
73171+}
73172+
73173diff --git a/grsecurity/gracl_compat.c b/grsecurity/gracl_compat.c
73174new file mode 100644
73175index 0000000..ca25605
73176--- /dev/null
73177+++ b/grsecurity/gracl_compat.c
73178@@ -0,0 +1,270 @@
73179+#include <linux/kernel.h>
73180+#include <linux/gracl.h>
73181+#include <linux/compat.h>
73182+#include <linux/gracl_compat.h>
73183+
73184+#include <asm/uaccess.h>
73185+
73186+int copy_gr_arg_wrapper_compat(const char *buf, struct gr_arg_wrapper *uwrap)
73187+{
73188+ struct gr_arg_wrapper_compat uwrapcompat;
73189+
73190+ if (copy_from_user(&uwrapcompat, buf, sizeof(uwrapcompat)))
73191+ return -EFAULT;
73192+
73193+ if (((uwrapcompat.version != GRSECURITY_VERSION) &&
73194+ (uwrapcompat.version != 0x2901)) ||
73195+ (uwrapcompat.size != sizeof(struct gr_arg_compat)))
73196+ return -EINVAL;
73197+
73198+ uwrap->arg = compat_ptr(uwrapcompat.arg);
73199+ uwrap->version = uwrapcompat.version;
73200+ uwrap->size = sizeof(struct gr_arg);
73201+
73202+ return 0;
73203+}
73204+
73205+int copy_gr_arg_compat(const struct gr_arg __user *buf, struct gr_arg *arg)
73206+{
73207+ struct gr_arg_compat argcompat;
73208+
73209+ if (copy_from_user(&argcompat, buf, sizeof(argcompat)))
73210+ return -EFAULT;
73211+
73212+ arg->role_db.r_table = compat_ptr(argcompat.role_db.r_table);
73213+ arg->role_db.num_pointers = argcompat.role_db.num_pointers;
73214+ arg->role_db.num_roles = argcompat.role_db.num_roles;
73215+ arg->role_db.num_domain_children = argcompat.role_db.num_domain_children;
73216+ arg->role_db.num_subjects = argcompat.role_db.num_subjects;
73217+ arg->role_db.num_objects = argcompat.role_db.num_objects;
73218+
73219+ memcpy(&arg->pw, &argcompat.pw, sizeof(arg->pw));
73220+ memcpy(&arg->salt, &argcompat.salt, sizeof(arg->salt));
73221+ memcpy(&arg->sum, &argcompat.sum, sizeof(arg->sum));
73222+ memcpy(&arg->sp_role, &argcompat.sp_role, sizeof(arg->sp_role));
73223+ arg->sprole_pws = compat_ptr(argcompat.sprole_pws);
73224+ arg->segv_device = argcompat.segv_device;
73225+ arg->segv_inode = argcompat.segv_inode;
73226+ arg->segv_uid = argcompat.segv_uid;
73227+ arg->num_sprole_pws = argcompat.num_sprole_pws;
73228+ arg->mode = argcompat.mode;
73229+
73230+ return 0;
73231+}
73232+
73233+int copy_acl_object_label_compat(struct acl_object_label *obj, const struct acl_object_label *userp)
73234+{
73235+ struct acl_object_label_compat objcompat;
73236+
73237+ if (copy_from_user(&objcompat, userp, sizeof(objcompat)))
73238+ return -EFAULT;
73239+
73240+ obj->filename = compat_ptr(objcompat.filename);
73241+ obj->inode = objcompat.inode;
73242+ obj->device = objcompat.device;
73243+ obj->mode = objcompat.mode;
73244+
73245+ obj->nested = compat_ptr(objcompat.nested);
73246+ obj->globbed = compat_ptr(objcompat.globbed);
73247+
73248+ obj->prev = compat_ptr(objcompat.prev);
73249+ obj->next = compat_ptr(objcompat.next);
73250+
73251+ return 0;
73252+}
73253+
73254+int copy_acl_subject_label_compat(struct acl_subject_label *subj, const struct acl_subject_label *userp)
73255+{
73256+ unsigned int i;
73257+ struct acl_subject_label_compat subjcompat;
73258+
73259+ if (copy_from_user(&subjcompat, userp, sizeof(subjcompat)))
73260+ return -EFAULT;
73261+
73262+ subj->filename = compat_ptr(subjcompat.filename);
73263+ subj->inode = subjcompat.inode;
73264+ subj->device = subjcompat.device;
73265+ subj->mode = subjcompat.mode;
73266+ subj->cap_mask = subjcompat.cap_mask;
73267+ subj->cap_lower = subjcompat.cap_lower;
73268+ subj->cap_invert_audit = subjcompat.cap_invert_audit;
73269+
73270+ for (i = 0; i < GR_NLIMITS; i++) {
73271+ if (subjcompat.res[i].rlim_cur == COMPAT_RLIM_INFINITY)
73272+ subj->res[i].rlim_cur = RLIM_INFINITY;
73273+ else
73274+ subj->res[i].rlim_cur = subjcompat.res[i].rlim_cur;
73275+ if (subjcompat.res[i].rlim_max == COMPAT_RLIM_INFINITY)
73276+ subj->res[i].rlim_max = RLIM_INFINITY;
73277+ else
73278+ subj->res[i].rlim_max = subjcompat.res[i].rlim_max;
73279+ }
73280+ subj->resmask = subjcompat.resmask;
73281+
73282+ subj->user_trans_type = subjcompat.user_trans_type;
73283+ subj->group_trans_type = subjcompat.group_trans_type;
73284+ subj->user_transitions = compat_ptr(subjcompat.user_transitions);
73285+ subj->group_transitions = compat_ptr(subjcompat.group_transitions);
73286+ subj->user_trans_num = subjcompat.user_trans_num;
73287+ subj->group_trans_num = subjcompat.group_trans_num;
73288+
73289+ memcpy(&subj->sock_families, &subjcompat.sock_families, sizeof(subj->sock_families));
73290+ memcpy(&subj->ip_proto, &subjcompat.ip_proto, sizeof(subj->ip_proto));
73291+ subj->ip_type = subjcompat.ip_type;
73292+ subj->ips = compat_ptr(subjcompat.ips);
73293+ subj->ip_num = subjcompat.ip_num;
73294+ subj->inaddr_any_override = subjcompat.inaddr_any_override;
73295+
73296+ subj->crashes = subjcompat.crashes;
73297+ subj->expires = subjcompat.expires;
73298+
73299+ subj->parent_subject = compat_ptr(subjcompat.parent_subject);
73300+ subj->hash = compat_ptr(subjcompat.hash);
73301+ subj->prev = compat_ptr(subjcompat.prev);
73302+ subj->next = compat_ptr(subjcompat.next);
73303+
73304+ subj->obj_hash = compat_ptr(subjcompat.obj_hash);
73305+ subj->obj_hash_size = subjcompat.obj_hash_size;
73306+ subj->pax_flags = subjcompat.pax_flags;
73307+
73308+ return 0;
73309+}
73310+
73311+int copy_acl_role_label_compat(struct acl_role_label *role, const struct acl_role_label *userp)
73312+{
73313+ struct acl_role_label_compat rolecompat;
73314+
73315+ if (copy_from_user(&rolecompat, userp, sizeof(rolecompat)))
73316+ return -EFAULT;
73317+
73318+ role->rolename = compat_ptr(rolecompat.rolename);
73319+ role->uidgid = rolecompat.uidgid;
73320+ role->roletype = rolecompat.roletype;
73321+
73322+ role->auth_attempts = rolecompat.auth_attempts;
73323+ role->expires = rolecompat.expires;
73324+
73325+ role->root_label = compat_ptr(rolecompat.root_label);
73326+ role->hash = compat_ptr(rolecompat.hash);
73327+
73328+ role->prev = compat_ptr(rolecompat.prev);
73329+ role->next = compat_ptr(rolecompat.next);
73330+
73331+ role->transitions = compat_ptr(rolecompat.transitions);
73332+ role->allowed_ips = compat_ptr(rolecompat.allowed_ips);
73333+ role->domain_children = compat_ptr(rolecompat.domain_children);
73334+ role->domain_child_num = rolecompat.domain_child_num;
73335+
73336+ role->umask = rolecompat.umask;
73337+
73338+ role->subj_hash = compat_ptr(rolecompat.subj_hash);
73339+ role->subj_hash_size = rolecompat.subj_hash_size;
73340+
73341+ return 0;
73342+}
73343+
73344+int copy_role_allowed_ip_compat(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp)
73345+{
73346+ struct role_allowed_ip_compat roleip_compat;
73347+
73348+ if (copy_from_user(&roleip_compat, userp, sizeof(roleip_compat)))
73349+ return -EFAULT;
73350+
73351+ roleip->addr = roleip_compat.addr;
73352+ roleip->netmask = roleip_compat.netmask;
73353+
73354+ roleip->prev = compat_ptr(roleip_compat.prev);
73355+ roleip->next = compat_ptr(roleip_compat.next);
73356+
73357+ return 0;
73358+}
73359+
73360+int copy_role_transition_compat(struct role_transition *trans, const struct role_transition *userp)
73361+{
73362+ struct role_transition_compat trans_compat;
73363+
73364+ if (copy_from_user(&trans_compat, userp, sizeof(trans_compat)))
73365+ return -EFAULT;
73366+
73367+ trans->rolename = compat_ptr(trans_compat.rolename);
73368+
73369+ trans->prev = compat_ptr(trans_compat.prev);
73370+ trans->next = compat_ptr(trans_compat.next);
73371+
73372+ return 0;
73373+
73374+}
73375+
73376+int copy_gr_hash_struct_compat(struct gr_hash_struct *hash, const struct gr_hash_struct *userp)
73377+{
73378+ struct gr_hash_struct_compat hash_compat;
73379+
73380+ if (copy_from_user(&hash_compat, userp, sizeof(hash_compat)))
73381+ return -EFAULT;
73382+
73383+ hash->table = compat_ptr(hash_compat.table);
73384+ hash->nametable = compat_ptr(hash_compat.nametable);
73385+ hash->first = compat_ptr(hash_compat.first);
73386+
73387+ hash->table_size = hash_compat.table_size;
73388+ hash->used_size = hash_compat.used_size;
73389+
73390+ hash->type = hash_compat.type;
73391+
73392+ return 0;
73393+}
73394+
73395+int copy_pointer_from_array_compat(void *ptr, unsigned long idx, const void *userp)
73396+{
73397+ compat_uptr_t ptrcompat;
73398+
73399+ if (copy_from_user(&ptrcompat, userp + (idx * sizeof(ptrcompat)), sizeof(ptrcompat)))
73400+ return -EFAULT;
73401+
73402+ *(void **)ptr = compat_ptr(ptrcompat);
73403+
73404+ return 0;
73405+}
73406+
73407+int copy_acl_ip_label_compat(struct acl_ip_label *ip, const struct acl_ip_label *userp)
73408+{
73409+ struct acl_ip_label_compat ip_compat;
73410+
73411+ if (copy_from_user(&ip_compat, userp, sizeof(ip_compat)))
73412+ return -EFAULT;
73413+
73414+ ip->iface = compat_ptr(ip_compat.iface);
73415+ ip->addr = ip_compat.addr;
73416+ ip->netmask = ip_compat.netmask;
73417+ ip->low = ip_compat.low;
73418+ ip->high = ip_compat.high;
73419+ ip->mode = ip_compat.mode;
73420+ ip->type = ip_compat.type;
73421+
73422+ memcpy(&ip->proto, &ip_compat.proto, sizeof(ip->proto));
73423+
73424+ ip->prev = compat_ptr(ip_compat.prev);
73425+ ip->next = compat_ptr(ip_compat.next);
73426+
73427+ return 0;
73428+}
73429+
73430+int copy_sprole_pw_compat(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp)
73431+{
73432+ struct sprole_pw_compat pw_compat;
73433+
73434+ if (copy_from_user(&pw_compat, (const void *)userp + (sizeof(pw_compat) * idx), sizeof(pw_compat)))
73435+ return -EFAULT;
73436+
73437+ pw->rolename = compat_ptr(pw_compat.rolename);
73438+ memcpy(&pw->salt, pw_compat.salt, sizeof(pw->salt));
73439+ memcpy(&pw->sum, pw_compat.sum, sizeof(pw->sum));
73440+
73441+ return 0;
73442+}
73443+
73444+size_t get_gr_arg_wrapper_size_compat(void)
73445+{
73446+ return sizeof(struct gr_arg_wrapper_compat);
73447+}
73448+
73449diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
73450new file mode 100644
73451index 0000000..4008fdc
73452--- /dev/null
73453+++ b/grsecurity/gracl_fs.c
73454@@ -0,0 +1,445 @@
73455+#include <linux/kernel.h>
73456+#include <linux/sched.h>
73457+#include <linux/types.h>
73458+#include <linux/fs.h>
73459+#include <linux/file.h>
73460+#include <linux/stat.h>
73461+#include <linux/grsecurity.h>
73462+#include <linux/grinternal.h>
73463+#include <linux/gracl.h>
73464+
73465+umode_t
73466+gr_acl_umask(void)
73467+{
73468+ if (unlikely(!gr_acl_is_enabled()))
73469+ return 0;
73470+
73471+ return current->role->umask;
73472+}
73473+
73474+__u32
73475+gr_acl_handle_hidden_file(const struct dentry * dentry,
73476+ const struct vfsmount * mnt)
73477+{
73478+ __u32 mode;
73479+
73480+ if (unlikely(d_is_negative(dentry)))
73481+ return GR_FIND;
73482+
73483+ mode =
73484+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
73485+
73486+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
73487+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
73488+ return mode;
73489+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
73490+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
73491+ return 0;
73492+ } else if (unlikely(!(mode & GR_FIND)))
73493+ return 0;
73494+
73495+ return GR_FIND;
73496+}
73497+
73498+__u32
73499+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
73500+ int acc_mode)
73501+{
73502+ __u32 reqmode = GR_FIND;
73503+ __u32 mode;
73504+
73505+ if (unlikely(d_is_negative(dentry)))
73506+ return reqmode;
73507+
73508+ if (acc_mode & MAY_APPEND)
73509+ reqmode |= GR_APPEND;
73510+ else if (acc_mode & MAY_WRITE)
73511+ reqmode |= GR_WRITE;
73512+ if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
73513+ reqmode |= GR_READ;
73514+
73515+ mode =
73516+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
73517+ mnt);
73518+
73519+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
73520+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
73521+ reqmode & GR_READ ? " reading" : "",
73522+ reqmode & GR_WRITE ? " writing" : reqmode &
73523+ GR_APPEND ? " appending" : "");
73524+ return reqmode;
73525+ } else
73526+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
73527+ {
73528+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
73529+ reqmode & GR_READ ? " reading" : "",
73530+ reqmode & GR_WRITE ? " writing" : reqmode &
73531+ GR_APPEND ? " appending" : "");
73532+ return 0;
73533+ } else if (unlikely((mode & reqmode) != reqmode))
73534+ return 0;
73535+
73536+ return reqmode;
73537+}
73538+
73539+__u32
73540+gr_acl_handle_creat(const struct dentry * dentry,
73541+ const struct dentry * p_dentry,
73542+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
73543+ const int imode)
73544+{
73545+ __u32 reqmode = GR_WRITE | GR_CREATE;
73546+ __u32 mode;
73547+
73548+ if (acc_mode & MAY_APPEND)
73549+ reqmode |= GR_APPEND;
73550+ // if a directory was required or the directory already exists, then
73551+ // don't count this open as a read
73552+ if ((acc_mode & MAY_READ) &&
73553+ !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
73554+ reqmode |= GR_READ;
73555+ if ((open_flags & O_CREAT) &&
73556+ ((imode & S_ISUID) || ((imode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
73557+ reqmode |= GR_SETID;
73558+
73559+ mode =
73560+ gr_check_create(dentry, p_dentry, p_mnt,
73561+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
73562+
73563+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
73564+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
73565+ reqmode & GR_READ ? " reading" : "",
73566+ reqmode & GR_WRITE ? " writing" : reqmode &
73567+ GR_APPEND ? " appending" : "");
73568+ return reqmode;
73569+ } else
73570+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
73571+ {
73572+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
73573+ reqmode & GR_READ ? " reading" : "",
73574+ reqmode & GR_WRITE ? " writing" : reqmode &
73575+ GR_APPEND ? " appending" : "");
73576+ return 0;
73577+ } else if (unlikely((mode & reqmode) != reqmode))
73578+ return 0;
73579+
73580+ return reqmode;
73581+}
73582+
73583+__u32
73584+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
73585+ const int fmode)
73586+{
73587+ __u32 mode, reqmode = GR_FIND;
73588+
73589+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
73590+ reqmode |= GR_EXEC;
73591+ if (fmode & S_IWOTH)
73592+ reqmode |= GR_WRITE;
73593+ if (fmode & S_IROTH)
73594+ reqmode |= GR_READ;
73595+
73596+ mode =
73597+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
73598+ mnt);
73599+
73600+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
73601+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
73602+ reqmode & GR_READ ? " reading" : "",
73603+ reqmode & GR_WRITE ? " writing" : "",
73604+ reqmode & GR_EXEC ? " executing" : "");
73605+ return reqmode;
73606+ } else
73607+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
73608+ {
73609+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
73610+ reqmode & GR_READ ? " reading" : "",
73611+ reqmode & GR_WRITE ? " writing" : "",
73612+ reqmode & GR_EXEC ? " executing" : "");
73613+ return 0;
73614+ } else if (unlikely((mode & reqmode) != reqmode))
73615+ return 0;
73616+
73617+ return reqmode;
73618+}
73619+
73620+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
73621+{
73622+ __u32 mode;
73623+
73624+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
73625+
73626+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
73627+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
73628+ return mode;
73629+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
73630+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
73631+ return 0;
73632+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
73633+ return 0;
73634+
73635+ return (reqmode);
73636+}
73637+
73638+__u32
73639+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
73640+{
73641+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
73642+}
73643+
73644+__u32
73645+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
73646+{
73647+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
73648+}
73649+
73650+__u32
73651+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
73652+{
73653+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
73654+}
73655+
73656+__u32
73657+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
73658+{
73659+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
73660+}
73661+
73662+__u32
73663+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
73664+ umode_t *modeptr)
73665+{
73666+ umode_t mode;
73667+
73668+ *modeptr &= ~gr_acl_umask();
73669+ mode = *modeptr;
73670+
73671+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
73672+ return 1;
73673+
73674+ if (unlikely(dentry->d_inode && !S_ISDIR(dentry->d_inode->i_mode) &&
73675+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))) {
73676+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
73677+ GR_CHMOD_ACL_MSG);
73678+ } else {
73679+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
73680+ }
73681+}
73682+
73683+__u32
73684+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
73685+{
73686+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
73687+}
73688+
73689+__u32
73690+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
73691+{
73692+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
73693+}
73694+
73695+__u32
73696+gr_acl_handle_removexattr(const struct dentry *dentry, const struct vfsmount *mnt)
73697+{
73698+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_REMOVEXATTR_ACL_MSG);
73699+}
73700+
73701+__u32
73702+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
73703+{
73704+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
73705+}
73706+
73707+__u32
73708+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
73709+{
73710+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
73711+ GR_UNIXCONNECT_ACL_MSG);
73712+}
73713+
73714+/* hardlinks require at minimum create and link permission,
73715+ any additional privilege required is based on the
73716+ privilege of the file being linked to
73717+*/
73718+__u32
73719+gr_acl_handle_link(const struct dentry * new_dentry,
73720+ const struct dentry * parent_dentry,
73721+ const struct vfsmount * parent_mnt,
73722+ const struct dentry * old_dentry,
73723+ const struct vfsmount * old_mnt, const struct filename *to)
73724+{
73725+ __u32 mode;
73726+ __u32 needmode = GR_CREATE | GR_LINK;
73727+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
73728+
73729+ mode =
73730+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
73731+ old_mnt);
73732+
73733+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
73734+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
73735+ return mode;
73736+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
73737+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
73738+ return 0;
73739+ } else if (unlikely((mode & needmode) != needmode))
73740+ return 0;
73741+
73742+ return 1;
73743+}
73744+
73745+__u32
73746+gr_acl_handle_symlink(const struct dentry * new_dentry,
73747+ const struct dentry * parent_dentry,
73748+ const struct vfsmount * parent_mnt, const struct filename *from)
73749+{
73750+ __u32 needmode = GR_WRITE | GR_CREATE;
73751+ __u32 mode;
73752+
73753+ mode =
73754+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
73755+ GR_CREATE | GR_AUDIT_CREATE |
73756+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
73757+
73758+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
73759+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
73760+ return mode;
73761+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
73762+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
73763+ return 0;
73764+ } else if (unlikely((mode & needmode) != needmode))
73765+ return 0;
73766+
73767+ return (GR_WRITE | GR_CREATE);
73768+}
73769+
73770+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
73771+{
73772+ __u32 mode;
73773+
73774+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
73775+
73776+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
73777+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
73778+ return mode;
73779+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
73780+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
73781+ return 0;
73782+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
73783+ return 0;
73784+
73785+ return (reqmode);
73786+}
73787+
73788+__u32
73789+gr_acl_handle_mknod(const struct dentry * new_dentry,
73790+ const struct dentry * parent_dentry,
73791+ const struct vfsmount * parent_mnt,
73792+ const int mode)
73793+{
73794+ __u32 reqmode = GR_WRITE | GR_CREATE;
73795+ if (unlikely((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
73796+ reqmode |= GR_SETID;
73797+
73798+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
73799+ reqmode, GR_MKNOD_ACL_MSG);
73800+}
73801+
73802+__u32
73803+gr_acl_handle_mkdir(const struct dentry *new_dentry,
73804+ const struct dentry *parent_dentry,
73805+ const struct vfsmount *parent_mnt)
73806+{
73807+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
73808+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
73809+}
73810+
73811+#define RENAME_CHECK_SUCCESS(old, new) \
73812+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
73813+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
73814+
73815+int
73816+gr_acl_handle_rename(struct dentry *new_dentry,
73817+ struct dentry *parent_dentry,
73818+ const struct vfsmount *parent_mnt,
73819+ struct dentry *old_dentry,
73820+ struct inode *old_parent_inode,
73821+ struct vfsmount *old_mnt, const struct filename *newname, unsigned int flags)
73822+{
73823+ __u32 comp1, comp2;
73824+ int error = 0;
73825+
73826+ if (unlikely(!gr_acl_is_enabled()))
73827+ return 0;
73828+
73829+ if (flags & RENAME_EXCHANGE) {
73830+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
73831+ GR_AUDIT_READ | GR_AUDIT_WRITE |
73832+ GR_SUPPRESS, parent_mnt);
73833+ comp2 =
73834+ gr_search_file(old_dentry,
73835+ GR_READ | GR_WRITE | GR_AUDIT_READ |
73836+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
73837+ } else if (d_is_negative(new_dentry)) {
73838+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
73839+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
73840+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
73841+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
73842+ GR_DELETE | GR_AUDIT_DELETE |
73843+ GR_AUDIT_READ | GR_AUDIT_WRITE |
73844+ GR_SUPPRESS, old_mnt);
73845+ } else {
73846+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
73847+ GR_CREATE | GR_DELETE |
73848+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
73849+ GR_AUDIT_READ | GR_AUDIT_WRITE |
73850+ GR_SUPPRESS, parent_mnt);
73851+ comp2 =
73852+ gr_search_file(old_dentry,
73853+ GR_READ | GR_WRITE | GR_AUDIT_READ |
73854+ GR_DELETE | GR_AUDIT_DELETE |
73855+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
73856+ }
73857+
73858+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
73859+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
73860+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
73861+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
73862+ && !(comp2 & GR_SUPPRESS)) {
73863+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
73864+ error = -EACCES;
73865+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
73866+ error = -EACCES;
73867+
73868+ return error;
73869+}
73870+
73871+void
73872+gr_acl_handle_exit(void)
73873+{
73874+ u16 id;
73875+ char *rolename;
73876+
73877+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
73878+ !(current->role->roletype & GR_ROLE_PERSIST))) {
73879+ id = current->acl_role_id;
73880+ rolename = current->role->rolename;
73881+ gr_set_acls(1);
73882+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
73883+ }
73884+
73885+ gr_put_exec_file(current);
73886+ return;
73887+}
73888+
73889+int
73890+gr_acl_handle_procpidmem(const struct task_struct *task)
73891+{
73892+ if (unlikely(!gr_acl_is_enabled()))
73893+ return 0;
73894+
73895+ if (task != current && task->acl->mode & GR_PROTPROCFD)
73896+ return -EACCES;
73897+
73898+ return 0;
73899+}
73900diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
73901new file mode 100644
73902index 0000000..f056b81
73903--- /dev/null
73904+++ b/grsecurity/gracl_ip.c
73905@@ -0,0 +1,386 @@
73906+#include <linux/kernel.h>
73907+#include <asm/uaccess.h>
73908+#include <asm/errno.h>
73909+#include <net/sock.h>
73910+#include <linux/file.h>
73911+#include <linux/fs.h>
73912+#include <linux/net.h>
73913+#include <linux/in.h>
73914+#include <linux/skbuff.h>
73915+#include <linux/ip.h>
73916+#include <linux/udp.h>
73917+#include <linux/types.h>
73918+#include <linux/sched.h>
73919+#include <linux/netdevice.h>
73920+#include <linux/inetdevice.h>
73921+#include <linux/gracl.h>
73922+#include <linux/grsecurity.h>
73923+#include <linux/grinternal.h>
73924+
73925+#define GR_BIND 0x01
73926+#define GR_CONNECT 0x02
73927+#define GR_INVERT 0x04
73928+#define GR_BINDOVERRIDE 0x08
73929+#define GR_CONNECTOVERRIDE 0x10
73930+#define GR_SOCK_FAMILY 0x20
73931+
73932+static const char * gr_protocols[IPPROTO_MAX] = {
73933+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
73934+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
73935+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
73936+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
73937+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
73938+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
73939+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
73940+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
73941+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
73942+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
73943+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
73944+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
73945+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
73946+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
73947+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
73948+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
73949+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
73950+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
73951+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
73952+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
73953+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
73954+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
73955+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
73956+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
73957+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
73958+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
73959+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
73960+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
73961+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
73962+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
73963+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
73964+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
73965+ };
73966+
73967+static const char * gr_socktypes[SOCK_MAX] = {
73968+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
73969+ "unknown:7", "unknown:8", "unknown:9", "packet"
73970+ };
73971+
73972+static const char * gr_sockfamilies[AF_MAX+1] = {
73973+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
73974+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
73975+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
73976+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
73977+ };
73978+
73979+const char *
73980+gr_proto_to_name(unsigned char proto)
73981+{
73982+ return gr_protocols[proto];
73983+}
73984+
73985+const char *
73986+gr_socktype_to_name(unsigned char type)
73987+{
73988+ return gr_socktypes[type];
73989+}
73990+
73991+const char *
73992+gr_sockfamily_to_name(unsigned char family)
73993+{
73994+ return gr_sockfamilies[family];
73995+}
73996+
73997+extern const struct net_proto_family __rcu *net_families[NPROTO] __read_mostly;
73998+
73999+int
74000+gr_search_socket(const int domain, const int type, const int protocol)
74001+{
74002+ struct acl_subject_label *curr;
74003+ const struct cred *cred = current_cred();
74004+
74005+ if (unlikely(!gr_acl_is_enabled()))
74006+ goto exit;
74007+
74008+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
74009+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
74010+ goto exit; // let the kernel handle it
74011+
74012+ curr = current->acl;
74013+
74014+ if (curr->sock_families[domain / 32] & (1U << (domain % 32))) {
74015+ /* the family is allowed, if this is PF_INET allow it only if
74016+ the extra sock type/protocol checks pass */
74017+ if (domain == PF_INET)
74018+ goto inet_check;
74019+ goto exit;
74020+ } else {
74021+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
74022+ __u32 fakeip = 0;
74023+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
74024+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
74025+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
74026+ gr_to_filename(current->exec_file->f_path.dentry,
74027+ current->exec_file->f_path.mnt) :
74028+ curr->filename, curr->filename,
74029+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
74030+ &current->signal->saved_ip);
74031+ goto exit;
74032+ }
74033+ goto exit_fail;
74034+ }
74035+
74036+inet_check:
74037+ /* the rest of this checking is for IPv4 only */
74038+ if (!curr->ips)
74039+ goto exit;
74040+
74041+ if ((curr->ip_type & (1U << type)) &&
74042+ (curr->ip_proto[protocol / 32] & (1U << (protocol % 32))))
74043+ goto exit;
74044+
74045+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
74046+ /* we don't place acls on raw sockets , and sometimes
74047+ dgram/ip sockets are opened for ioctl and not
74048+ bind/connect, so we'll fake a bind learn log */
74049+ if (type == SOCK_RAW || type == SOCK_PACKET) {
74050+ __u32 fakeip = 0;
74051+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
74052+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
74053+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
74054+ gr_to_filename(current->exec_file->f_path.dentry,
74055+ current->exec_file->f_path.mnt) :
74056+ curr->filename, curr->filename,
74057+ &fakeip, 0, type,
74058+ protocol, GR_CONNECT, &current->signal->saved_ip);
74059+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
74060+ __u32 fakeip = 0;
74061+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
74062+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
74063+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
74064+ gr_to_filename(current->exec_file->f_path.dentry,
74065+ current->exec_file->f_path.mnt) :
74066+ curr->filename, curr->filename,
74067+ &fakeip, 0, type,
74068+ protocol, GR_BIND, &current->signal->saved_ip);
74069+ }
74070+ /* we'll log when they use connect or bind */
74071+ goto exit;
74072+ }
74073+
74074+exit_fail:
74075+ if (domain == PF_INET)
74076+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
74077+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
74078+ else if (rcu_access_pointer(net_families[domain]) != NULL)
74079+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
74080+ gr_socktype_to_name(type), protocol);
74081+
74082+ return 0;
74083+exit:
74084+ return 1;
74085+}
74086+
74087+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
74088+{
74089+ if ((ip->mode & mode) &&
74090+ (ip_port >= ip->low) &&
74091+ (ip_port <= ip->high) &&
74092+ ((ntohl(ip_addr) & our_netmask) ==
74093+ (ntohl(our_addr) & our_netmask))
74094+ && (ip->proto[protocol / 32] & (1U << (protocol % 32)))
74095+ && (ip->type & (1U << type))) {
74096+ if (ip->mode & GR_INVERT)
74097+ return 2; // specifically denied
74098+ else
74099+ return 1; // allowed
74100+ }
74101+
74102+ return 0; // not specifically allowed, may continue parsing
74103+}
74104+
74105+static int
74106+gr_search_connectbind(const int full_mode, struct sock *sk,
74107+ struct sockaddr_in *addr, const int type)
74108+{
74109+ char iface[IFNAMSIZ] = {0};
74110+ struct acl_subject_label *curr;
74111+ struct acl_ip_label *ip;
74112+ struct inet_sock *isk;
74113+ struct net_device *dev;
74114+ struct in_device *idev;
74115+ unsigned long i;
74116+ int ret;
74117+ int mode = full_mode & (GR_BIND | GR_CONNECT);
74118+ __u32 ip_addr = 0;
74119+ __u32 our_addr;
74120+ __u32 our_netmask;
74121+ char *p;
74122+ __u16 ip_port = 0;
74123+ const struct cred *cred = current_cred();
74124+
74125+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
74126+ return 0;
74127+
74128+ curr = current->acl;
74129+ isk = inet_sk(sk);
74130+
74131+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
74132+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
74133+ addr->sin_addr.s_addr = curr->inaddr_any_override;
74134+ if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
74135+ struct sockaddr_in saddr;
74136+ int err;
74137+
74138+ saddr.sin_family = AF_INET;
74139+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
74140+ saddr.sin_port = isk->inet_sport;
74141+
74142+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
74143+ if (err)
74144+ return err;
74145+
74146+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
74147+ if (err)
74148+ return err;
74149+ }
74150+
74151+ if (!curr->ips)
74152+ return 0;
74153+
74154+ ip_addr = addr->sin_addr.s_addr;
74155+ ip_port = ntohs(addr->sin_port);
74156+
74157+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
74158+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
74159+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
74160+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
74161+ gr_to_filename(current->exec_file->f_path.dentry,
74162+ current->exec_file->f_path.mnt) :
74163+ curr->filename, curr->filename,
74164+ &ip_addr, ip_port, type,
74165+ sk->sk_protocol, mode, &current->signal->saved_ip);
74166+ return 0;
74167+ }
74168+
74169+ for (i = 0; i < curr->ip_num; i++) {
74170+ ip = *(curr->ips + i);
74171+ if (ip->iface != NULL) {
74172+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
74173+ p = strchr(iface, ':');
74174+ if (p != NULL)
74175+ *p = '\0';
74176+ dev = dev_get_by_name(sock_net(sk), iface);
74177+ if (dev == NULL)
74178+ continue;
74179+ idev = in_dev_get(dev);
74180+ if (idev == NULL) {
74181+ dev_put(dev);
74182+ continue;
74183+ }
74184+ rcu_read_lock();
74185+ for_ifa(idev) {
74186+ if (!strcmp(ip->iface, ifa->ifa_label)) {
74187+ our_addr = ifa->ifa_address;
74188+ our_netmask = 0xffffffff;
74189+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
74190+ if (ret == 1) {
74191+ rcu_read_unlock();
74192+ in_dev_put(idev);
74193+ dev_put(dev);
74194+ return 0;
74195+ } else if (ret == 2) {
74196+ rcu_read_unlock();
74197+ in_dev_put(idev);
74198+ dev_put(dev);
74199+ goto denied;
74200+ }
74201+ }
74202+ } endfor_ifa(idev);
74203+ rcu_read_unlock();
74204+ in_dev_put(idev);
74205+ dev_put(dev);
74206+ } else {
74207+ our_addr = ip->addr;
74208+ our_netmask = ip->netmask;
74209+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
74210+ if (ret == 1)
74211+ return 0;
74212+ else if (ret == 2)
74213+ goto denied;
74214+ }
74215+ }
74216+
74217+denied:
74218+ if (mode == GR_BIND)
74219+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
74220+ else if (mode == GR_CONNECT)
74221+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
74222+
74223+ return -EACCES;
74224+}
74225+
74226+int
74227+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
74228+{
74229+ /* always allow disconnection of dgram sockets with connect */
74230+ if (addr->sin_family == AF_UNSPEC)
74231+ return 0;
74232+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
74233+}
74234+
74235+int
74236+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
74237+{
74238+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
74239+}
74240+
74241+int gr_search_listen(struct socket *sock)
74242+{
74243+ struct sock *sk = sock->sk;
74244+ struct sockaddr_in addr;
74245+
74246+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
74247+ addr.sin_port = inet_sk(sk)->inet_sport;
74248+
74249+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
74250+}
74251+
74252+int gr_search_accept(struct socket *sock)
74253+{
74254+ struct sock *sk = sock->sk;
74255+ struct sockaddr_in addr;
74256+
74257+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
74258+ addr.sin_port = inet_sk(sk)->inet_sport;
74259+
74260+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
74261+}
74262+
74263+int
74264+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
74265+{
74266+ if (addr)
74267+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
74268+ else {
74269+ struct sockaddr_in sin;
74270+ const struct inet_sock *inet = inet_sk(sk);
74271+
74272+ sin.sin_addr.s_addr = inet->inet_daddr;
74273+ sin.sin_port = inet->inet_dport;
74274+
74275+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
74276+ }
74277+}
74278+
74279+int
74280+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
74281+{
74282+ struct sockaddr_in sin;
74283+
74284+ if (unlikely(skb->len < sizeof (struct udphdr)))
74285+ return 0; // skip this packet
74286+
74287+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
74288+ sin.sin_port = udp_hdr(skb)->source;
74289+
74290+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
74291+}
74292diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
74293new file mode 100644
74294index 0000000..25f54ef
74295--- /dev/null
74296+++ b/grsecurity/gracl_learn.c
74297@@ -0,0 +1,207 @@
74298+#include <linux/kernel.h>
74299+#include <linux/mm.h>
74300+#include <linux/sched.h>
74301+#include <linux/poll.h>
74302+#include <linux/string.h>
74303+#include <linux/file.h>
74304+#include <linux/types.h>
74305+#include <linux/vmalloc.h>
74306+#include <linux/grinternal.h>
74307+
74308+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
74309+ size_t count, loff_t *ppos);
74310+extern int gr_acl_is_enabled(void);
74311+
74312+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
74313+static int gr_learn_attached;
74314+
74315+/* use a 512k buffer */
74316+#define LEARN_BUFFER_SIZE (512 * 1024)
74317+
74318+static DEFINE_SPINLOCK(gr_learn_lock);
74319+static DEFINE_MUTEX(gr_learn_user_mutex);
74320+
74321+/* we need to maintain two buffers, so that the kernel context of grlearn
74322+ uses a semaphore around the userspace copying, and the other kernel contexts
74323+ use a spinlock when copying into the buffer, since they cannot sleep
74324+*/
74325+static char *learn_buffer;
74326+static char *learn_buffer_user;
74327+static int learn_buffer_len;
74328+static int learn_buffer_user_len;
74329+
74330+static ssize_t
74331+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
74332+{
74333+ DECLARE_WAITQUEUE(wait, current);
74334+ ssize_t retval = 0;
74335+
74336+ add_wait_queue(&learn_wait, &wait);
74337+ set_current_state(TASK_INTERRUPTIBLE);
74338+ do {
74339+ mutex_lock(&gr_learn_user_mutex);
74340+ spin_lock(&gr_learn_lock);
74341+ if (learn_buffer_len)
74342+ break;
74343+ spin_unlock(&gr_learn_lock);
74344+ mutex_unlock(&gr_learn_user_mutex);
74345+ if (file->f_flags & O_NONBLOCK) {
74346+ retval = -EAGAIN;
74347+ goto out;
74348+ }
74349+ if (signal_pending(current)) {
74350+ retval = -ERESTARTSYS;
74351+ goto out;
74352+ }
74353+
74354+ schedule();
74355+ } while (1);
74356+
74357+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
74358+ learn_buffer_user_len = learn_buffer_len;
74359+ retval = learn_buffer_len;
74360+ learn_buffer_len = 0;
74361+
74362+ spin_unlock(&gr_learn_lock);
74363+
74364+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
74365+ retval = -EFAULT;
74366+
74367+ mutex_unlock(&gr_learn_user_mutex);
74368+out:
74369+ set_current_state(TASK_RUNNING);
74370+ remove_wait_queue(&learn_wait, &wait);
74371+ return retval;
74372+}
74373+
74374+static unsigned int
74375+poll_learn(struct file * file, poll_table * wait)
74376+{
74377+ poll_wait(file, &learn_wait, wait);
74378+
74379+ if (learn_buffer_len)
74380+ return (POLLIN | POLLRDNORM);
74381+
74382+ return 0;
74383+}
74384+
74385+void
74386+gr_clear_learn_entries(void)
74387+{
74388+ char *tmp;
74389+
74390+ mutex_lock(&gr_learn_user_mutex);
74391+ spin_lock(&gr_learn_lock);
74392+ tmp = learn_buffer;
74393+ learn_buffer = NULL;
74394+ spin_unlock(&gr_learn_lock);
74395+ if (tmp)
74396+ vfree(tmp);
74397+ if (learn_buffer_user != NULL) {
74398+ vfree(learn_buffer_user);
74399+ learn_buffer_user = NULL;
74400+ }
74401+ learn_buffer_len = 0;
74402+ mutex_unlock(&gr_learn_user_mutex);
74403+
74404+ return;
74405+}
74406+
74407+void
74408+gr_add_learn_entry(const char *fmt, ...)
74409+{
74410+ va_list args;
74411+ unsigned int len;
74412+
74413+ if (!gr_learn_attached)
74414+ return;
74415+
74416+ spin_lock(&gr_learn_lock);
74417+
74418+ /* leave a gap at the end so we know when it's "full" but don't have to
74419+ compute the exact length of the string we're trying to append
74420+ */
74421+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
74422+ spin_unlock(&gr_learn_lock);
74423+ wake_up_interruptible(&learn_wait);
74424+ return;
74425+ }
74426+ if (learn_buffer == NULL) {
74427+ spin_unlock(&gr_learn_lock);
74428+ return;
74429+ }
74430+
74431+ va_start(args, fmt);
74432+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
74433+ va_end(args);
74434+
74435+ learn_buffer_len += len + 1;
74436+
74437+ spin_unlock(&gr_learn_lock);
74438+ wake_up_interruptible(&learn_wait);
74439+
74440+ return;
74441+}
74442+
74443+static int
74444+open_learn(struct inode *inode, struct file *file)
74445+{
74446+ if (file->f_mode & FMODE_READ && gr_learn_attached)
74447+ return -EBUSY;
74448+ if (file->f_mode & FMODE_READ) {
74449+ int retval = 0;
74450+ mutex_lock(&gr_learn_user_mutex);
74451+ if (learn_buffer == NULL)
74452+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
74453+ if (learn_buffer_user == NULL)
74454+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
74455+ if (learn_buffer == NULL) {
74456+ retval = -ENOMEM;
74457+ goto out_error;
74458+ }
74459+ if (learn_buffer_user == NULL) {
74460+ retval = -ENOMEM;
74461+ goto out_error;
74462+ }
74463+ learn_buffer_len = 0;
74464+ learn_buffer_user_len = 0;
74465+ gr_learn_attached = 1;
74466+out_error:
74467+ mutex_unlock(&gr_learn_user_mutex);
74468+ return retval;
74469+ }
74470+ return 0;
74471+}
74472+
74473+static int
74474+close_learn(struct inode *inode, struct file *file)
74475+{
74476+ if (file->f_mode & FMODE_READ) {
74477+ char *tmp = NULL;
74478+ mutex_lock(&gr_learn_user_mutex);
74479+ spin_lock(&gr_learn_lock);
74480+ tmp = learn_buffer;
74481+ learn_buffer = NULL;
74482+ spin_unlock(&gr_learn_lock);
74483+ if (tmp)
74484+ vfree(tmp);
74485+ if (learn_buffer_user != NULL) {
74486+ vfree(learn_buffer_user);
74487+ learn_buffer_user = NULL;
74488+ }
74489+ learn_buffer_len = 0;
74490+ learn_buffer_user_len = 0;
74491+ gr_learn_attached = 0;
74492+ mutex_unlock(&gr_learn_user_mutex);
74493+ }
74494+
74495+ return 0;
74496+}
74497+
74498+const struct file_operations grsec_fops = {
74499+ .read = read_learn,
74500+ .write = write_grsec_handler,
74501+ .open = open_learn,
74502+ .release = close_learn,
74503+ .poll = poll_learn,
74504+};
74505diff --git a/grsecurity/gracl_policy.c b/grsecurity/gracl_policy.c
74506new file mode 100644
74507index 0000000..3f8ade0
74508--- /dev/null
74509+++ b/grsecurity/gracl_policy.c
74510@@ -0,0 +1,1782 @@
74511+#include <linux/kernel.h>
74512+#include <linux/module.h>
74513+#include <linux/sched.h>
74514+#include <linux/mm.h>
74515+#include <linux/file.h>
74516+#include <linux/fs.h>
74517+#include <linux/namei.h>
74518+#include <linux/mount.h>
74519+#include <linux/tty.h>
74520+#include <linux/proc_fs.h>
74521+#include <linux/lglock.h>
74522+#include <linux/slab.h>
74523+#include <linux/vmalloc.h>
74524+#include <linux/types.h>
74525+#include <linux/sysctl.h>
74526+#include <linux/netdevice.h>
74527+#include <linux/ptrace.h>
74528+#include <linux/gracl.h>
74529+#include <linux/gralloc.h>
74530+#include <linux/security.h>
74531+#include <linux/grinternal.h>
74532+#include <linux/pid_namespace.h>
74533+#include <linux/stop_machine.h>
74534+#include <linux/fdtable.h>
74535+#include <linux/percpu.h>
74536+#include <linux/lglock.h>
74537+#include <linux/hugetlb.h>
74538+#include <linux/posix-timers.h>
74539+#include "../fs/mount.h"
74540+
74541+#include <asm/uaccess.h>
74542+#include <asm/errno.h>
74543+#include <asm/mman.h>
74544+
74545+extern struct gr_policy_state *polstate;
74546+
74547+#define FOR_EACH_ROLE_START(role) \
74548+ role = polstate->role_list; \
74549+ while (role) {
74550+
74551+#define FOR_EACH_ROLE_END(role) \
74552+ role = role->prev; \
74553+ }
74554+
74555+struct path gr_real_root;
74556+
74557+extern struct gr_alloc_state *current_alloc_state;
74558+
74559+u16 acl_sp_role_value;
74560+
74561+static DEFINE_MUTEX(gr_dev_mutex);
74562+
74563+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
74564+extern void gr_clear_learn_entries(void);
74565+
74566+struct gr_arg *gr_usermode __read_only;
74567+unsigned char *gr_system_salt __read_only;
74568+unsigned char *gr_system_sum __read_only;
74569+
74570+static unsigned int gr_auth_attempts = 0;
74571+static unsigned long gr_auth_expires = 0UL;
74572+
74573+struct acl_object_label *fakefs_obj_rw;
74574+struct acl_object_label *fakefs_obj_rwx;
74575+
74576+extern int gr_init_uidset(void);
74577+extern void gr_free_uidset(void);
74578+extern void gr_remove_uid(uid_t uid);
74579+extern int gr_find_uid(uid_t uid);
74580+
74581+extern struct acl_subject_label *__gr_get_subject_for_task(const struct gr_policy_state *state, struct task_struct *task, const char *filename);
74582+extern void __gr_apply_subject_to_task(struct gr_policy_state *state, struct task_struct *task, struct acl_subject_label *subj);
74583+extern int gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb);
74584+extern void __insert_inodev_entry(const struct gr_policy_state *state, struct inodev_entry *entry);
74585+extern struct acl_role_label *__lookup_acl_role_label(const struct gr_policy_state *state, const struct task_struct *task, const uid_t uid, const gid_t gid);
74586+extern void insert_acl_obj_label(struct acl_object_label *obj, struct acl_subject_label *subj);
74587+extern void insert_acl_subj_label(struct acl_subject_label *obj, struct acl_role_label *role);
74588+extern struct name_entry * __lookup_name_entry(const struct gr_policy_state *state, const char *name);
74589+extern char *gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt);
74590+extern struct acl_subject_label *lookup_acl_subj_label(const ino_t ino, const dev_t dev, const struct acl_role_label *role);
74591+extern struct acl_subject_label *lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev, const struct acl_role_label *role);
74592+extern void assign_special_role(const char *rolename);
74593+extern struct acl_subject_label *chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt, const struct acl_role_label *role);
74594+extern int gr_rbac_disable(void *unused);
74595+extern void gr_enable_rbac_system(void);
74596+
74597+static int copy_acl_object_label_normal(struct acl_object_label *obj, const struct acl_object_label *userp)
74598+{
74599+ if (copy_from_user(obj, userp, sizeof(struct acl_object_label)))
74600+ return -EFAULT;
74601+
74602+ return 0;
74603+}
74604+
74605+static int copy_acl_ip_label_normal(struct acl_ip_label *ip, const struct acl_ip_label *userp)
74606+{
74607+ if (copy_from_user(ip, userp, sizeof(struct acl_ip_label)))
74608+ return -EFAULT;
74609+
74610+ return 0;
74611+}
74612+
74613+static int copy_acl_subject_label_normal(struct acl_subject_label *subj, const struct acl_subject_label *userp)
74614+{
74615+ if (copy_from_user(subj, userp, sizeof(struct acl_subject_label)))
74616+ return -EFAULT;
74617+
74618+ return 0;
74619+}
74620+
74621+static int copy_acl_role_label_normal(struct acl_role_label *role, const struct acl_role_label *userp)
74622+{
74623+ if (copy_from_user(role, userp, sizeof(struct acl_role_label)))
74624+ return -EFAULT;
74625+
74626+ return 0;
74627+}
74628+
74629+static int copy_role_allowed_ip_normal(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp)
74630+{
74631+ if (copy_from_user(roleip, userp, sizeof(struct role_allowed_ip)))
74632+ return -EFAULT;
74633+
74634+ return 0;
74635+}
74636+
74637+static int copy_sprole_pw_normal(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp)
74638+{
74639+ if (copy_from_user(pw, userp + idx, sizeof(struct sprole_pw)))
74640+ return -EFAULT;
74641+
74642+ return 0;
74643+}
74644+
74645+static int copy_gr_hash_struct_normal(struct gr_hash_struct *hash, const struct gr_hash_struct *userp)
74646+{
74647+ if (copy_from_user(hash, userp, sizeof(struct gr_hash_struct)))
74648+ return -EFAULT;
74649+
74650+ return 0;
74651+}
74652+
74653+static int copy_role_transition_normal(struct role_transition *trans, const struct role_transition *userp)
74654+{
74655+ if (copy_from_user(trans, userp, sizeof(struct role_transition)))
74656+ return -EFAULT;
74657+
74658+ return 0;
74659+}
74660+
74661+int copy_pointer_from_array_normal(void *ptr, unsigned long idx, const void *userp)
74662+{
74663+ if (copy_from_user(ptr, userp + (idx * sizeof(void *)), sizeof(void *)))
74664+ return -EFAULT;
74665+
74666+ return 0;
74667+}
74668+
74669+static int copy_gr_arg_wrapper_normal(const char __user *buf, struct gr_arg_wrapper *uwrap)
74670+{
74671+ if (copy_from_user(uwrap, buf, sizeof (struct gr_arg_wrapper)))
74672+ return -EFAULT;
74673+
74674+ if (((uwrap->version != GRSECURITY_VERSION) &&
74675+ (uwrap->version != 0x2901)) ||
74676+ (uwrap->size != sizeof(struct gr_arg)))
74677+ return -EINVAL;
74678+
74679+ return 0;
74680+}
74681+
74682+static int copy_gr_arg_normal(const struct gr_arg __user *buf, struct gr_arg *arg)
74683+{
74684+ if (copy_from_user(arg, buf, sizeof (struct gr_arg)))
74685+ return -EFAULT;
74686+
74687+ return 0;
74688+}
74689+
74690+static size_t get_gr_arg_wrapper_size_normal(void)
74691+{
74692+ return sizeof(struct gr_arg_wrapper);
74693+}
74694+
74695+#ifdef CONFIG_COMPAT
74696+extern int copy_gr_arg_wrapper_compat(const char *buf, struct gr_arg_wrapper *uwrap);
74697+extern int copy_gr_arg_compat(const struct gr_arg __user *buf, struct gr_arg *arg);
74698+extern int copy_acl_object_label_compat(struct acl_object_label *obj, const struct acl_object_label *userp);
74699+extern int copy_acl_subject_label_compat(struct acl_subject_label *subj, const struct acl_subject_label *userp);
74700+extern int copy_acl_role_label_compat(struct acl_role_label *role, const struct acl_role_label *userp);
74701+extern int copy_role_allowed_ip_compat(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp);
74702+extern int copy_role_transition_compat(struct role_transition *trans, const struct role_transition *userp);
74703+extern int copy_gr_hash_struct_compat(struct gr_hash_struct *hash, const struct gr_hash_struct *userp);
74704+extern int copy_pointer_from_array_compat(void *ptr, unsigned long idx, const void *userp);
74705+extern int copy_acl_ip_label_compat(struct acl_ip_label *ip, const struct acl_ip_label *userp);
74706+extern int copy_sprole_pw_compat(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp);
74707+extern size_t get_gr_arg_wrapper_size_compat(void);
74708+
74709+int (* copy_gr_arg_wrapper)(const char *buf, struct gr_arg_wrapper *uwrap) __read_only;
74710+int (* copy_gr_arg)(const struct gr_arg *buf, struct gr_arg *arg) __read_only;
74711+int (* copy_acl_object_label)(struct acl_object_label *obj, const struct acl_object_label *userp) __read_only;
74712+int (* copy_acl_subject_label)(struct acl_subject_label *subj, const struct acl_subject_label *userp) __read_only;
74713+int (* copy_acl_role_label)(struct acl_role_label *role, const struct acl_role_label *userp) __read_only;
74714+int (* copy_acl_ip_label)(struct acl_ip_label *ip, const struct acl_ip_label *userp) __read_only;
74715+int (* copy_pointer_from_array)(void *ptr, unsigned long idx, const void *userp) __read_only;
74716+int (* copy_sprole_pw)(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp) __read_only;
74717+int (* copy_gr_hash_struct)(struct gr_hash_struct *hash, const struct gr_hash_struct *userp) __read_only;
74718+int (* copy_role_transition)(struct role_transition *trans, const struct role_transition *userp) __read_only;
74719+int (* copy_role_allowed_ip)(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp) __read_only;
74720+size_t (* get_gr_arg_wrapper_size)(void) __read_only;
74721+
74722+#else
74723+#define copy_gr_arg_wrapper copy_gr_arg_wrapper_normal
74724+#define copy_gr_arg copy_gr_arg_normal
74725+#define copy_gr_hash_struct copy_gr_hash_struct_normal
74726+#define copy_acl_object_label copy_acl_object_label_normal
74727+#define copy_acl_subject_label copy_acl_subject_label_normal
74728+#define copy_acl_role_label copy_acl_role_label_normal
74729+#define copy_acl_ip_label copy_acl_ip_label_normal
74730+#define copy_pointer_from_array copy_pointer_from_array_normal
74731+#define copy_sprole_pw copy_sprole_pw_normal
74732+#define copy_role_transition copy_role_transition_normal
74733+#define copy_role_allowed_ip copy_role_allowed_ip_normal
74734+#define get_gr_arg_wrapper_size get_gr_arg_wrapper_size_normal
74735+#endif
74736+
74737+static struct acl_subject_label *
74738+lookup_subject_map(const struct acl_subject_label *userp)
74739+{
74740+ unsigned int index = gr_shash(userp, polstate->subj_map_set.s_size);
74741+ struct subject_map *match;
74742+
74743+ match = polstate->subj_map_set.s_hash[index];
74744+
74745+ while (match && match->user != userp)
74746+ match = match->next;
74747+
74748+ if (match != NULL)
74749+ return match->kernel;
74750+ else
74751+ return NULL;
74752+}
74753+
74754+static void
74755+insert_subj_map_entry(struct subject_map *subjmap)
74756+{
74757+ unsigned int index = gr_shash(subjmap->user, polstate->subj_map_set.s_size);
74758+ struct subject_map **curr;
74759+
74760+ subjmap->prev = NULL;
74761+
74762+ curr = &polstate->subj_map_set.s_hash[index];
74763+ if (*curr != NULL)
74764+ (*curr)->prev = subjmap;
74765+
74766+ subjmap->next = *curr;
74767+ *curr = subjmap;
74768+
74769+ return;
74770+}
74771+
74772+static void
74773+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
74774+{
74775+ unsigned int index =
74776+ gr_rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), polstate->acl_role_set.r_size);
74777+ struct acl_role_label **curr;
74778+ struct acl_role_label *tmp, *tmp2;
74779+
74780+ curr = &polstate->acl_role_set.r_hash[index];
74781+
74782+ /* simple case, slot is empty, just set it to our role */
74783+ if (*curr == NULL) {
74784+ *curr = role;
74785+ } else {
74786+ /* example:
74787+ 1 -> 2 -> 3 (adding 2 -> 3 to here)
74788+ 2 -> 3
74789+ */
74790+ /* first check to see if we can already be reached via this slot */
74791+ tmp = *curr;
74792+ while (tmp && tmp != role)
74793+ tmp = tmp->next;
74794+ if (tmp == role) {
74795+ /* we don't need to add ourselves to this slot's chain */
74796+ return;
74797+ }
74798+ /* we need to add ourselves to this chain, two cases */
74799+ if (role->next == NULL) {
74800+ /* simple case, append the current chain to our role */
74801+ role->next = *curr;
74802+ *curr = role;
74803+ } else {
74804+ /* 1 -> 2 -> 3 -> 4
74805+ 2 -> 3 -> 4
74806+ 3 -> 4 (adding 1 -> 2 -> 3 -> 4 to here)
74807+ */
74808+ /* trickier case: walk our role's chain until we find
74809+ the role for the start of the current slot's chain */
74810+ tmp = role;
74811+ tmp2 = *curr;
74812+ while (tmp->next && tmp->next != tmp2)
74813+ tmp = tmp->next;
74814+ if (tmp->next == tmp2) {
74815+ /* from example above, we found 3, so just
74816+ replace this slot's chain with ours */
74817+ *curr = role;
74818+ } else {
74819+ /* we didn't find a subset of our role's chain
74820+ in the current slot's chain, so append their
74821+ chain to ours, and set us as the first role in
74822+ the slot's chain
74823+
74824+ we could fold this case with the case above,
74825+ but making it explicit for clarity
74826+ */
74827+ tmp->next = tmp2;
74828+ *curr = role;
74829+ }
74830+ }
74831+ }
74832+
74833+ return;
74834+}
74835+
74836+static void
74837+insert_acl_role_label(struct acl_role_label *role)
74838+{
74839+ int i;
74840+
74841+ if (polstate->role_list == NULL) {
74842+ polstate->role_list = role;
74843+ role->prev = NULL;
74844+ } else {
74845+ role->prev = polstate->role_list;
74846+ polstate->role_list = role;
74847+ }
74848+
74849+ /* used for hash chains */
74850+ role->next = NULL;
74851+
74852+ if (role->roletype & GR_ROLE_DOMAIN) {
74853+ for (i = 0; i < role->domain_child_num; i++)
74854+ __insert_acl_role_label(role, role->domain_children[i]);
74855+ } else
74856+ __insert_acl_role_label(role, role->uidgid);
74857+}
74858+
74859+static int
74860+insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
74861+{
74862+ struct name_entry **curr, *nentry;
74863+ struct inodev_entry *ientry;
74864+ unsigned int len = strlen(name);
74865+ unsigned int key = full_name_hash(name, len);
74866+ unsigned int index = key % polstate->name_set.n_size;
74867+
74868+ curr = &polstate->name_set.n_hash[index];
74869+
74870+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
74871+ curr = &((*curr)->next);
74872+
74873+ if (*curr != NULL)
74874+ return 1;
74875+
74876+ nentry = acl_alloc(sizeof (struct name_entry));
74877+ if (nentry == NULL)
74878+ return 0;
74879+ ientry = acl_alloc(sizeof (struct inodev_entry));
74880+ if (ientry == NULL)
74881+ return 0;
74882+ ientry->nentry = nentry;
74883+
74884+ nentry->key = key;
74885+ nentry->name = name;
74886+ nentry->inode = inode;
74887+ nentry->device = device;
74888+ nentry->len = len;
74889+ nentry->deleted = deleted;
74890+
74891+ nentry->prev = NULL;
74892+ curr = &polstate->name_set.n_hash[index];
74893+ if (*curr != NULL)
74894+ (*curr)->prev = nentry;
74895+ nentry->next = *curr;
74896+ *curr = nentry;
74897+
74898+ /* insert us into the table searchable by inode/dev */
74899+ __insert_inodev_entry(polstate, ientry);
74900+
74901+ return 1;
74902+}
74903+
74904+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
74905+
74906+static void *
74907+create_table(__u32 * len, int elementsize)
74908+{
74909+ unsigned int table_sizes[] = {
74910+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
74911+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
74912+ 4194301, 8388593, 16777213, 33554393, 67108859
74913+ };
74914+ void *newtable = NULL;
74915+ unsigned int pwr = 0;
74916+
74917+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
74918+ table_sizes[pwr] <= *len)
74919+ pwr++;
74920+
74921+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
74922+ return newtable;
74923+
74924+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
74925+ newtable =
74926+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
74927+ else
74928+ newtable = vmalloc(table_sizes[pwr] * elementsize);
74929+
74930+ *len = table_sizes[pwr];
74931+
74932+ return newtable;
74933+}
74934+
74935+static int
74936+init_variables(const struct gr_arg *arg, bool reload)
74937+{
74938+ struct task_struct *reaper = init_pid_ns.child_reaper;
74939+ unsigned int stacksize;
74940+
74941+ polstate->subj_map_set.s_size = arg->role_db.num_subjects;
74942+ polstate->acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
74943+ polstate->name_set.n_size = arg->role_db.num_objects;
74944+ polstate->inodev_set.i_size = arg->role_db.num_objects;
74945+
74946+ if (!polstate->subj_map_set.s_size || !polstate->acl_role_set.r_size ||
74947+ !polstate->name_set.n_size || !polstate->inodev_set.i_size)
74948+ return 1;
74949+
74950+ if (!reload) {
74951+ if (!gr_init_uidset())
74952+ return 1;
74953+ }
74954+
74955+ /* set up the stack that holds allocation info */
74956+
74957+ stacksize = arg->role_db.num_pointers + 5;
74958+
74959+ if (!acl_alloc_stack_init(stacksize))
74960+ return 1;
74961+
74962+ if (!reload) {
74963+ /* grab reference for the real root dentry and vfsmount */
74964+ get_fs_root(reaper->fs, &gr_real_root);
74965+
74966+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
74967+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(gr_real_root.dentry), gr_real_root.dentry->d_inode->i_ino);
74968+#endif
74969+
74970+ fakefs_obj_rw = kzalloc(sizeof(struct acl_object_label), GFP_KERNEL);
74971+ if (fakefs_obj_rw == NULL)
74972+ return 1;
74973+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
74974+
74975+ fakefs_obj_rwx = kzalloc(sizeof(struct acl_object_label), GFP_KERNEL);
74976+ if (fakefs_obj_rwx == NULL)
74977+ return 1;
74978+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
74979+ }
74980+
74981+ polstate->subj_map_set.s_hash =
74982+ (struct subject_map **) create_table(&polstate->subj_map_set.s_size, sizeof(void *));
74983+ polstate->acl_role_set.r_hash =
74984+ (struct acl_role_label **) create_table(&polstate->acl_role_set.r_size, sizeof(void *));
74985+ polstate->name_set.n_hash = (struct name_entry **) create_table(&polstate->name_set.n_size, sizeof(void *));
74986+ polstate->inodev_set.i_hash =
74987+ (struct inodev_entry **) create_table(&polstate->inodev_set.i_size, sizeof(void *));
74988+
74989+ if (!polstate->subj_map_set.s_hash || !polstate->acl_role_set.r_hash ||
74990+ !polstate->name_set.n_hash || !polstate->inodev_set.i_hash)
74991+ return 1;
74992+
74993+ memset(polstate->subj_map_set.s_hash, 0,
74994+ sizeof(struct subject_map *) * polstate->subj_map_set.s_size);
74995+ memset(polstate->acl_role_set.r_hash, 0,
74996+ sizeof (struct acl_role_label *) * polstate->acl_role_set.r_size);
74997+ memset(polstate->name_set.n_hash, 0,
74998+ sizeof (struct name_entry *) * polstate->name_set.n_size);
74999+ memset(polstate->inodev_set.i_hash, 0,
75000+ sizeof (struct inodev_entry *) * polstate->inodev_set.i_size);
75001+
75002+ return 0;
75003+}
75004+
75005+/* free information not needed after startup
75006+ currently contains user->kernel pointer mappings for subjects
75007+*/
75008+
75009+static void
75010+free_init_variables(void)
75011+{
75012+ __u32 i;
75013+
75014+ if (polstate->subj_map_set.s_hash) {
75015+ for (i = 0; i < polstate->subj_map_set.s_size; i++) {
75016+ if (polstate->subj_map_set.s_hash[i]) {
75017+ kfree(polstate->subj_map_set.s_hash[i]);
75018+ polstate->subj_map_set.s_hash[i] = NULL;
75019+ }
75020+ }
75021+
75022+ if ((polstate->subj_map_set.s_size * sizeof (struct subject_map *)) <=
75023+ PAGE_SIZE)
75024+ kfree(polstate->subj_map_set.s_hash);
75025+ else
75026+ vfree(polstate->subj_map_set.s_hash);
75027+ }
75028+
75029+ return;
75030+}
75031+
75032+static void
75033+free_variables(bool reload)
75034+{
75035+ struct acl_subject_label *s;
75036+ struct acl_role_label *r;
75037+ struct task_struct *task, *task2;
75038+ unsigned int x;
75039+
75040+ if (!reload) {
75041+ gr_clear_learn_entries();
75042+
75043+ read_lock(&tasklist_lock);
75044+ do_each_thread(task2, task) {
75045+ task->acl_sp_role = 0;
75046+ task->acl_role_id = 0;
75047+ task->inherited = 0;
75048+ task->acl = NULL;
75049+ task->role = NULL;
75050+ } while_each_thread(task2, task);
75051+ read_unlock(&tasklist_lock);
75052+
75053+ kfree(fakefs_obj_rw);
75054+ fakefs_obj_rw = NULL;
75055+ kfree(fakefs_obj_rwx);
75056+ fakefs_obj_rwx = NULL;
75057+
75058+ /* release the reference to the real root dentry and vfsmount */
75059+ path_put(&gr_real_root);
75060+ memset(&gr_real_root, 0, sizeof(gr_real_root));
75061+ }
75062+
75063+ /* free all object hash tables */
75064+
75065+ FOR_EACH_ROLE_START(r)
75066+ if (r->subj_hash == NULL)
75067+ goto next_role;
75068+ FOR_EACH_SUBJECT_START(r, s, x)
75069+ if (s->obj_hash == NULL)
75070+ break;
75071+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
75072+ kfree(s->obj_hash);
75073+ else
75074+ vfree(s->obj_hash);
75075+ FOR_EACH_SUBJECT_END(s, x)
75076+ FOR_EACH_NESTED_SUBJECT_START(r, s)
75077+ if (s->obj_hash == NULL)
75078+ break;
75079+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
75080+ kfree(s->obj_hash);
75081+ else
75082+ vfree(s->obj_hash);
75083+ FOR_EACH_NESTED_SUBJECT_END(s)
75084+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
75085+ kfree(r->subj_hash);
75086+ else
75087+ vfree(r->subj_hash);
75088+ r->subj_hash = NULL;
75089+next_role:
75090+ FOR_EACH_ROLE_END(r)
75091+
75092+ acl_free_all();
75093+
75094+ if (polstate->acl_role_set.r_hash) {
75095+ if ((polstate->acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
75096+ PAGE_SIZE)
75097+ kfree(polstate->acl_role_set.r_hash);
75098+ else
75099+ vfree(polstate->acl_role_set.r_hash);
75100+ }
75101+ if (polstate->name_set.n_hash) {
75102+ if ((polstate->name_set.n_size * sizeof (struct name_entry *)) <=
75103+ PAGE_SIZE)
75104+ kfree(polstate->name_set.n_hash);
75105+ else
75106+ vfree(polstate->name_set.n_hash);
75107+ }
75108+
75109+ if (polstate->inodev_set.i_hash) {
75110+ if ((polstate->inodev_set.i_size * sizeof (struct inodev_entry *)) <=
75111+ PAGE_SIZE)
75112+ kfree(polstate->inodev_set.i_hash);
75113+ else
75114+ vfree(polstate->inodev_set.i_hash);
75115+ }
75116+
75117+ if (!reload)
75118+ gr_free_uidset();
75119+
75120+ memset(&polstate->name_set, 0, sizeof (struct name_db));
75121+ memset(&polstate->inodev_set, 0, sizeof (struct inodev_db));
75122+ memset(&polstate->acl_role_set, 0, sizeof (struct acl_role_db));
75123+ memset(&polstate->subj_map_set, 0, sizeof (struct acl_subj_map_db));
75124+
75125+ polstate->default_role = NULL;
75126+ polstate->kernel_role = NULL;
75127+ polstate->role_list = NULL;
75128+
75129+ return;
75130+}
75131+
75132+static struct acl_subject_label *
75133+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied);
75134+
75135+static int alloc_and_copy_string(char **name, unsigned int maxlen)
75136+{
75137+ unsigned int len = strnlen_user(*name, maxlen);
75138+ char *tmp;
75139+
75140+ if (!len || len >= maxlen)
75141+ return -EINVAL;
75142+
75143+ if ((tmp = (char *) acl_alloc(len)) == NULL)
75144+ return -ENOMEM;
75145+
75146+ if (copy_from_user(tmp, *name, len))
75147+ return -EFAULT;
75148+
75149+ tmp[len-1] = '\0';
75150+ *name = tmp;
75151+
75152+ return 0;
75153+}
75154+
75155+static int
75156+copy_user_glob(struct acl_object_label *obj)
75157+{
75158+ struct acl_object_label *g_tmp, **guser;
75159+ int error;
75160+
75161+ if (obj->globbed == NULL)
75162+ return 0;
75163+
75164+ guser = &obj->globbed;
75165+ while (*guser) {
75166+ g_tmp = (struct acl_object_label *)
75167+ acl_alloc(sizeof (struct acl_object_label));
75168+ if (g_tmp == NULL)
75169+ return -ENOMEM;
75170+
75171+ if (copy_acl_object_label(g_tmp, *guser))
75172+ return -EFAULT;
75173+
75174+ error = alloc_and_copy_string(&g_tmp->filename, PATH_MAX);
75175+ if (error)
75176+ return error;
75177+
75178+ *guser = g_tmp;
75179+ guser = &(g_tmp->next);
75180+ }
75181+
75182+ return 0;
75183+}
75184+
75185+static int
75186+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
75187+ struct acl_role_label *role)
75188+{
75189+ struct acl_object_label *o_tmp;
75190+ int ret;
75191+
75192+ while (userp) {
75193+ if ((o_tmp = (struct acl_object_label *)
75194+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
75195+ return -ENOMEM;
75196+
75197+ if (copy_acl_object_label(o_tmp, userp))
75198+ return -EFAULT;
75199+
75200+ userp = o_tmp->prev;
75201+
75202+ ret = alloc_and_copy_string(&o_tmp->filename, PATH_MAX);
75203+ if (ret)
75204+ return ret;
75205+
75206+ insert_acl_obj_label(o_tmp, subj);
75207+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
75208+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
75209+ return -ENOMEM;
75210+
75211+ ret = copy_user_glob(o_tmp);
75212+ if (ret)
75213+ return ret;
75214+
75215+ if (o_tmp->nested) {
75216+ int already_copied;
75217+
75218+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role, &already_copied);
75219+ if (IS_ERR(o_tmp->nested))
75220+ return PTR_ERR(o_tmp->nested);
75221+
75222+ /* insert into nested subject list if we haven't copied this one yet
75223+ to prevent duplicate entries */
75224+ if (!already_copied) {
75225+ o_tmp->nested->next = role->hash->first;
75226+ role->hash->first = o_tmp->nested;
75227+ }
75228+ }
75229+ }
75230+
75231+ return 0;
75232+}
75233+
75234+static __u32
75235+count_user_subjs(struct acl_subject_label *userp)
75236+{
75237+ struct acl_subject_label s_tmp;
75238+ __u32 num = 0;
75239+
75240+ while (userp) {
75241+ if (copy_acl_subject_label(&s_tmp, userp))
75242+ break;
75243+
75244+ userp = s_tmp.prev;
75245+ }
75246+
75247+ return num;
75248+}
75249+
75250+static int
75251+copy_user_allowedips(struct acl_role_label *rolep)
75252+{
75253+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
75254+
75255+ ruserip = rolep->allowed_ips;
75256+
75257+ while (ruserip) {
75258+ rlast = rtmp;
75259+
75260+ if ((rtmp = (struct role_allowed_ip *)
75261+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
75262+ return -ENOMEM;
75263+
75264+ if (copy_role_allowed_ip(rtmp, ruserip))
75265+ return -EFAULT;
75266+
75267+ ruserip = rtmp->prev;
75268+
75269+ if (!rlast) {
75270+ rtmp->prev = NULL;
75271+ rolep->allowed_ips = rtmp;
75272+ } else {
75273+ rlast->next = rtmp;
75274+ rtmp->prev = rlast;
75275+ }
75276+
75277+ if (!ruserip)
75278+ rtmp->next = NULL;
75279+ }
75280+
75281+ return 0;
75282+}
75283+
75284+static int
75285+copy_user_transitions(struct acl_role_label *rolep)
75286+{
75287+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
75288+ int error;
75289+
75290+ rusertp = rolep->transitions;
75291+
75292+ while (rusertp) {
75293+ rlast = rtmp;
75294+
75295+ if ((rtmp = (struct role_transition *)
75296+ acl_alloc(sizeof (struct role_transition))) == NULL)
75297+ return -ENOMEM;
75298+
75299+ if (copy_role_transition(rtmp, rusertp))
75300+ return -EFAULT;
75301+
75302+ rusertp = rtmp->prev;
75303+
75304+ error = alloc_and_copy_string(&rtmp->rolename, GR_SPROLE_LEN);
75305+ if (error)
75306+ return error;
75307+
75308+ if (!rlast) {
75309+ rtmp->prev = NULL;
75310+ rolep->transitions = rtmp;
75311+ } else {
75312+ rlast->next = rtmp;
75313+ rtmp->prev = rlast;
75314+ }
75315+
75316+ if (!rusertp)
75317+ rtmp->next = NULL;
75318+ }
75319+
75320+ return 0;
75321+}
75322+
75323+static __u32 count_user_objs(const struct acl_object_label __user *userp)
75324+{
75325+ struct acl_object_label o_tmp;
75326+ __u32 num = 0;
75327+
75328+ while (userp) {
75329+ if (copy_acl_object_label(&o_tmp, userp))
75330+ break;
75331+
75332+ userp = o_tmp.prev;
75333+ num++;
75334+ }
75335+
75336+ return num;
75337+}
75338+
75339+static struct acl_subject_label *
75340+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied)
75341+{
75342+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
75343+ __u32 num_objs;
75344+ struct acl_ip_label **i_tmp, *i_utmp2;
75345+ struct gr_hash_struct ghash;
75346+ struct subject_map *subjmap;
75347+ unsigned int i_num;
75348+ int err;
75349+
75350+ if (already_copied != NULL)
75351+ *already_copied = 0;
75352+
75353+ s_tmp = lookup_subject_map(userp);
75354+
75355+ /* we've already copied this subject into the kernel, just return
75356+ the reference to it, and don't copy it over again
75357+ */
75358+ if (s_tmp) {
75359+ if (already_copied != NULL)
75360+ *already_copied = 1;
75361+ return(s_tmp);
75362+ }
75363+
75364+ if ((s_tmp = (struct acl_subject_label *)
75365+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
75366+ return ERR_PTR(-ENOMEM);
75367+
75368+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
75369+ if (subjmap == NULL)
75370+ return ERR_PTR(-ENOMEM);
75371+
75372+ subjmap->user = userp;
75373+ subjmap->kernel = s_tmp;
75374+ insert_subj_map_entry(subjmap);
75375+
75376+ if (copy_acl_subject_label(s_tmp, userp))
75377+ return ERR_PTR(-EFAULT);
75378+
75379+ err = alloc_and_copy_string(&s_tmp->filename, PATH_MAX);
75380+ if (err)
75381+ return ERR_PTR(err);
75382+
75383+ if (!strcmp(s_tmp->filename, "/"))
75384+ role->root_label = s_tmp;
75385+
75386+ if (copy_gr_hash_struct(&ghash, s_tmp->hash))
75387+ return ERR_PTR(-EFAULT);
75388+
75389+ /* copy user and group transition tables */
75390+
75391+ if (s_tmp->user_trans_num) {
75392+ uid_t *uidlist;
75393+
75394+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
75395+ if (uidlist == NULL)
75396+ return ERR_PTR(-ENOMEM);
75397+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
75398+ return ERR_PTR(-EFAULT);
75399+
75400+ s_tmp->user_transitions = uidlist;
75401+ }
75402+
75403+ if (s_tmp->group_trans_num) {
75404+ gid_t *gidlist;
75405+
75406+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
75407+ if (gidlist == NULL)
75408+ return ERR_PTR(-ENOMEM);
75409+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
75410+ return ERR_PTR(-EFAULT);
75411+
75412+ s_tmp->group_transitions = gidlist;
75413+ }
75414+
75415+ /* set up object hash table */
75416+ num_objs = count_user_objs(ghash.first);
75417+
75418+ s_tmp->obj_hash_size = num_objs;
75419+ s_tmp->obj_hash =
75420+ (struct acl_object_label **)
75421+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
75422+
75423+ if (!s_tmp->obj_hash)
75424+ return ERR_PTR(-ENOMEM);
75425+
75426+ memset(s_tmp->obj_hash, 0,
75427+ s_tmp->obj_hash_size *
75428+ sizeof (struct acl_object_label *));
75429+
75430+ /* add in objects */
75431+ err = copy_user_objs(ghash.first, s_tmp, role);
75432+
75433+ if (err)
75434+ return ERR_PTR(err);
75435+
75436+ /* set pointer for parent subject */
75437+ if (s_tmp->parent_subject) {
75438+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role, NULL);
75439+
75440+ if (IS_ERR(s_tmp2))
75441+ return s_tmp2;
75442+
75443+ s_tmp->parent_subject = s_tmp2;
75444+ }
75445+
75446+ /* add in ip acls */
75447+
75448+ if (!s_tmp->ip_num) {
75449+ s_tmp->ips = NULL;
75450+ goto insert;
75451+ }
75452+
75453+ i_tmp =
75454+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
75455+ sizeof (struct acl_ip_label *));
75456+
75457+ if (!i_tmp)
75458+ return ERR_PTR(-ENOMEM);
75459+
75460+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
75461+ *(i_tmp + i_num) =
75462+ (struct acl_ip_label *)
75463+ acl_alloc(sizeof (struct acl_ip_label));
75464+ if (!*(i_tmp + i_num))
75465+ return ERR_PTR(-ENOMEM);
75466+
75467+ if (copy_pointer_from_array(&i_utmp2, i_num, s_tmp->ips))
75468+ return ERR_PTR(-EFAULT);
75469+
75470+ if (copy_acl_ip_label(*(i_tmp + i_num), i_utmp2))
75471+ return ERR_PTR(-EFAULT);
75472+
75473+ if ((*(i_tmp + i_num))->iface == NULL)
75474+ continue;
75475+
75476+ err = alloc_and_copy_string(&(*(i_tmp + i_num))->iface, IFNAMSIZ);
75477+ if (err)
75478+ return ERR_PTR(err);
75479+ }
75480+
75481+ s_tmp->ips = i_tmp;
75482+
75483+insert:
75484+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
75485+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
75486+ return ERR_PTR(-ENOMEM);
75487+
75488+ return s_tmp;
75489+}
75490+
75491+static int
75492+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
75493+{
75494+ struct acl_subject_label s_pre;
75495+ struct acl_subject_label * ret;
75496+ int err;
75497+
75498+ while (userp) {
75499+ if (copy_acl_subject_label(&s_pre, userp))
75500+ return -EFAULT;
75501+
75502+ ret = do_copy_user_subj(userp, role, NULL);
75503+
75504+ err = PTR_ERR(ret);
75505+ if (IS_ERR(ret))
75506+ return err;
75507+
75508+ insert_acl_subj_label(ret, role);
75509+
75510+ userp = s_pre.prev;
75511+ }
75512+
75513+ return 0;
75514+}
75515+
75516+static int
75517+copy_user_acl(struct gr_arg *arg)
75518+{
75519+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
75520+ struct acl_subject_label *subj_list;
75521+ struct sprole_pw *sptmp;
75522+ struct gr_hash_struct *ghash;
75523+ uid_t *domainlist;
75524+ unsigned int r_num;
75525+ int err = 0;
75526+ __u16 i;
75527+ __u32 num_subjs;
75528+
75529+ /* we need a default and kernel role */
75530+ if (arg->role_db.num_roles < 2)
75531+ return -EINVAL;
75532+
75533+ /* copy special role authentication info from userspace */
75534+
75535+ polstate->num_sprole_pws = arg->num_sprole_pws;
75536+ polstate->acl_special_roles = (struct sprole_pw **) acl_alloc_num(polstate->num_sprole_pws, sizeof(struct sprole_pw *));
75537+
75538+ if (!polstate->acl_special_roles && polstate->num_sprole_pws)
75539+ return -ENOMEM;
75540+
75541+ for (i = 0; i < polstate->num_sprole_pws; i++) {
75542+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
75543+ if (!sptmp)
75544+ return -ENOMEM;
75545+ if (copy_sprole_pw(sptmp, i, arg->sprole_pws))
75546+ return -EFAULT;
75547+
75548+ err = alloc_and_copy_string((char **)&sptmp->rolename, GR_SPROLE_LEN);
75549+ if (err)
75550+ return err;
75551+
75552+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
75553+ printk(KERN_ALERT "Copying special role %s\n", sptmp->rolename);
75554+#endif
75555+
75556+ polstate->acl_special_roles[i] = sptmp;
75557+ }
75558+
75559+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
75560+
75561+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
75562+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
75563+
75564+ if (!r_tmp)
75565+ return -ENOMEM;
75566+
75567+ if (copy_pointer_from_array(&r_utmp2, r_num, r_utmp))
75568+ return -EFAULT;
75569+
75570+ if (copy_acl_role_label(r_tmp, r_utmp2))
75571+ return -EFAULT;
75572+
75573+ err = alloc_and_copy_string(&r_tmp->rolename, GR_SPROLE_LEN);
75574+ if (err)
75575+ return err;
75576+
75577+ if (!strcmp(r_tmp->rolename, "default")
75578+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
75579+ polstate->default_role = r_tmp;
75580+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
75581+ polstate->kernel_role = r_tmp;
75582+ }
75583+
75584+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL)
75585+ return -ENOMEM;
75586+
75587+ if (copy_gr_hash_struct(ghash, r_tmp->hash))
75588+ return -EFAULT;
75589+
75590+ r_tmp->hash = ghash;
75591+
75592+ num_subjs = count_user_subjs(r_tmp->hash->first);
75593+
75594+ r_tmp->subj_hash_size = num_subjs;
75595+ r_tmp->subj_hash =
75596+ (struct acl_subject_label **)
75597+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
75598+
75599+ if (!r_tmp->subj_hash)
75600+ return -ENOMEM;
75601+
75602+ err = copy_user_allowedips(r_tmp);
75603+ if (err)
75604+ return err;
75605+
75606+ /* copy domain info */
75607+ if (r_tmp->domain_children != NULL) {
75608+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
75609+ if (domainlist == NULL)
75610+ return -ENOMEM;
75611+
75612+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t)))
75613+ return -EFAULT;
75614+
75615+ r_tmp->domain_children = domainlist;
75616+ }
75617+
75618+ err = copy_user_transitions(r_tmp);
75619+ if (err)
75620+ return err;
75621+
75622+ memset(r_tmp->subj_hash, 0,
75623+ r_tmp->subj_hash_size *
75624+ sizeof (struct acl_subject_label *));
75625+
75626+ /* acquire the list of subjects, then NULL out
75627+ the list prior to parsing the subjects for this role,
75628+ as during this parsing the list is replaced with a list
75629+ of *nested* subjects for the role
75630+ */
75631+ subj_list = r_tmp->hash->first;
75632+
75633+ /* set nested subject list to null */
75634+ r_tmp->hash->first = NULL;
75635+
75636+ err = copy_user_subjs(subj_list, r_tmp);
75637+
75638+ if (err)
75639+ return err;
75640+
75641+ insert_acl_role_label(r_tmp);
75642+ }
75643+
75644+ if (polstate->default_role == NULL || polstate->kernel_role == NULL)
75645+ return -EINVAL;
75646+
75647+ return err;
75648+}
75649+
75650+static int gracl_reload_apply_policies(void *reload)
75651+{
75652+ struct gr_reload_state *reload_state = (struct gr_reload_state *)reload;
75653+ struct task_struct *task, *task2;
75654+ struct acl_role_label *role, *rtmp;
75655+ struct acl_subject_label *subj;
75656+ const struct cred *cred;
75657+ int role_applied;
75658+ int ret = 0;
75659+
75660+ memcpy(&reload_state->oldpolicy, reload_state->oldpolicy_ptr, sizeof(struct gr_policy_state));
75661+ memcpy(&reload_state->oldalloc, reload_state->oldalloc_ptr, sizeof(struct gr_alloc_state));
75662+
75663+ /* first make sure we'll be able to apply the new policy cleanly */
75664+ do_each_thread(task2, task) {
75665+ if (task->exec_file == NULL)
75666+ continue;
75667+ role_applied = 0;
75668+ if (!reload_state->oldmode && task->role->roletype & GR_ROLE_SPECIAL) {
75669+ /* preserve special roles */
75670+ FOR_EACH_ROLE_START(role)
75671+ if ((role->roletype & GR_ROLE_SPECIAL) && !strcmp(task->role->rolename, role->rolename)) {
75672+ rtmp = task->role;
75673+ task->role = role;
75674+ role_applied = 1;
75675+ break;
75676+ }
75677+ FOR_EACH_ROLE_END(role)
75678+ }
75679+ if (!role_applied) {
75680+ cred = __task_cred(task);
75681+ rtmp = task->role;
75682+ task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
75683+ }
75684+ /* this handles non-nested inherited subjects, nested subjects will still
75685+ be dropped currently */
75686+ subj = __gr_get_subject_for_task(polstate, task, task->acl->filename);
75687+ task->tmpacl = __gr_get_subject_for_task(polstate, task, NULL);
75688+ /* change the role back so that we've made no modifications to the policy */
75689+ task->role = rtmp;
75690+
75691+ if (subj == NULL || task->tmpacl == NULL) {
75692+ ret = -EINVAL;
75693+ goto out;
75694+ }
75695+ } while_each_thread(task2, task);
75696+
75697+ /* now actually apply the policy */
75698+
75699+ do_each_thread(task2, task) {
75700+ if (task->exec_file) {
75701+ role_applied = 0;
75702+ if (!reload_state->oldmode && task->role->roletype & GR_ROLE_SPECIAL) {
75703+ /* preserve special roles */
75704+ FOR_EACH_ROLE_START(role)
75705+ if ((role->roletype & GR_ROLE_SPECIAL) && !strcmp(task->role->rolename, role->rolename)) {
75706+ task->role = role;
75707+ role_applied = 1;
75708+ break;
75709+ }
75710+ FOR_EACH_ROLE_END(role)
75711+ }
75712+ if (!role_applied) {
75713+ cred = __task_cred(task);
75714+ task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
75715+ }
75716+ /* this handles non-nested inherited subjects, nested subjects will still
75717+ be dropped currently */
75718+ if (!reload_state->oldmode && task->inherited)
75719+ subj = __gr_get_subject_for_task(polstate, task, task->acl->filename);
75720+ else {
75721+ /* looked up and tagged to the task previously */
75722+ subj = task->tmpacl;
75723+ }
75724+ /* subj will be non-null */
75725+ __gr_apply_subject_to_task(polstate, task, subj);
75726+ if (reload_state->oldmode) {
75727+ task->acl_role_id = 0;
75728+ task->acl_sp_role = 0;
75729+ task->inherited = 0;
75730+ }
75731+ } else {
75732+ // it's a kernel process
75733+ task->role = polstate->kernel_role;
75734+ task->acl = polstate->kernel_role->root_label;
75735+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
75736+ task->acl->mode &= ~GR_PROCFIND;
75737+#endif
75738+ }
75739+ } while_each_thread(task2, task);
75740+
75741+ memcpy(reload_state->oldpolicy_ptr, &reload_state->newpolicy, sizeof(struct gr_policy_state));
75742+ memcpy(reload_state->oldalloc_ptr, &reload_state->newalloc, sizeof(struct gr_alloc_state));
75743+
75744+out:
75745+
75746+ return ret;
75747+}
75748+
75749+static int gracl_reload(struct gr_arg *args, unsigned char oldmode)
75750+{
75751+ struct gr_reload_state new_reload_state = { };
75752+ int err;
75753+
75754+ new_reload_state.oldpolicy_ptr = polstate;
75755+ new_reload_state.oldalloc_ptr = current_alloc_state;
75756+ new_reload_state.oldmode = oldmode;
75757+
75758+ current_alloc_state = &new_reload_state.newalloc;
75759+ polstate = &new_reload_state.newpolicy;
75760+
75761+ /* everything relevant is now saved off, copy in the new policy */
75762+ if (init_variables(args, true)) {
75763+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
75764+ err = -ENOMEM;
75765+ goto error;
75766+ }
75767+
75768+ err = copy_user_acl(args);
75769+ free_init_variables();
75770+ if (err)
75771+ goto error;
75772+ /* the new policy is copied in, with the old policy available via saved_state
75773+ first go through applying roles, making sure to preserve special roles
75774+ then apply new subjects, making sure to preserve inherited and nested subjects,
75775+ though currently only inherited subjects will be preserved
75776+ */
75777+ err = stop_machine(gracl_reload_apply_policies, &new_reload_state, NULL);
75778+ if (err)
75779+ goto error;
75780+
75781+ /* we've now applied the new policy, so restore the old policy state to free it */
75782+ polstate = &new_reload_state.oldpolicy;
75783+ current_alloc_state = &new_reload_state.oldalloc;
75784+ free_variables(true);
75785+
75786+ /* oldpolicy/oldalloc_ptr point to the new policy/alloc states as they were copied
75787+ to running_polstate/current_alloc_state inside stop_machine
75788+ */
75789+ err = 0;
75790+ goto out;
75791+error:
75792+ /* on error of loading the new policy, we'll just keep the previous
75793+ policy set around
75794+ */
75795+ free_variables(true);
75796+
75797+ /* doesn't affect runtime, but maintains consistent state */
75798+out:
75799+ polstate = new_reload_state.oldpolicy_ptr;
75800+ current_alloc_state = new_reload_state.oldalloc_ptr;
75801+
75802+ return err;
75803+}
75804+
75805+static int
75806+gracl_init(struct gr_arg *args)
75807+{
75808+ int error = 0;
75809+
75810+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
75811+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
75812+
75813+ if (init_variables(args, false)) {
75814+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
75815+ error = -ENOMEM;
75816+ goto out;
75817+ }
75818+
75819+ error = copy_user_acl(args);
75820+ free_init_variables();
75821+ if (error)
75822+ goto out;
75823+
75824+ error = gr_set_acls(0);
75825+ if (error)
75826+ goto out;
75827+
75828+ gr_enable_rbac_system();
75829+
75830+ return 0;
75831+
75832+out:
75833+ free_variables(false);
75834+ return error;
75835+}
75836+
75837+static int
75838+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
75839+ unsigned char **sum)
75840+{
75841+ struct acl_role_label *r;
75842+ struct role_allowed_ip *ipp;
75843+ struct role_transition *trans;
75844+ unsigned int i;
75845+ int found = 0;
75846+ u32 curr_ip = current->signal->curr_ip;
75847+
75848+ current->signal->saved_ip = curr_ip;
75849+
75850+ /* check transition table */
75851+
75852+ for (trans = current->role->transitions; trans; trans = trans->next) {
75853+ if (!strcmp(rolename, trans->rolename)) {
75854+ found = 1;
75855+ break;
75856+ }
75857+ }
75858+
75859+ if (!found)
75860+ return 0;
75861+
75862+ /* handle special roles that do not require authentication
75863+ and check ip */
75864+
75865+ FOR_EACH_ROLE_START(r)
75866+ if (!strcmp(rolename, r->rolename) &&
75867+ (r->roletype & GR_ROLE_SPECIAL)) {
75868+ found = 0;
75869+ if (r->allowed_ips != NULL) {
75870+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
75871+ if ((ntohl(curr_ip) & ipp->netmask) ==
75872+ (ntohl(ipp->addr) & ipp->netmask))
75873+ found = 1;
75874+ }
75875+ } else
75876+ found = 2;
75877+ if (!found)
75878+ return 0;
75879+
75880+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
75881+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
75882+ *salt = NULL;
75883+ *sum = NULL;
75884+ return 1;
75885+ }
75886+ }
75887+ FOR_EACH_ROLE_END(r)
75888+
75889+ for (i = 0; i < polstate->num_sprole_pws; i++) {
75890+ if (!strcmp(rolename, polstate->acl_special_roles[i]->rolename)) {
75891+ *salt = polstate->acl_special_roles[i]->salt;
75892+ *sum = polstate->acl_special_roles[i]->sum;
75893+ return 1;
75894+ }
75895+ }
75896+
75897+ return 0;
75898+}
75899+
75900+int gr_check_secure_terminal(struct task_struct *task)
75901+{
75902+ struct task_struct *p, *p2, *p3;
75903+ struct files_struct *files;
75904+ struct fdtable *fdt;
75905+ struct file *our_file = NULL, *file;
75906+ int i;
75907+
75908+ if (task->signal->tty == NULL)
75909+ return 1;
75910+
75911+ files = get_files_struct(task);
75912+ if (files != NULL) {
75913+ rcu_read_lock();
75914+ fdt = files_fdtable(files);
75915+ for (i=0; i < fdt->max_fds; i++) {
75916+ file = fcheck_files(files, i);
75917+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
75918+ get_file(file);
75919+ our_file = file;
75920+ }
75921+ }
75922+ rcu_read_unlock();
75923+ put_files_struct(files);
75924+ }
75925+
75926+ if (our_file == NULL)
75927+ return 1;
75928+
75929+ read_lock(&tasklist_lock);
75930+ do_each_thread(p2, p) {
75931+ files = get_files_struct(p);
75932+ if (files == NULL ||
75933+ (p->signal && p->signal->tty == task->signal->tty)) {
75934+ if (files != NULL)
75935+ put_files_struct(files);
75936+ continue;
75937+ }
75938+ rcu_read_lock();
75939+ fdt = files_fdtable(files);
75940+ for (i=0; i < fdt->max_fds; i++) {
75941+ file = fcheck_files(files, i);
75942+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
75943+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
75944+ p3 = task;
75945+ while (task_pid_nr(p3) > 0) {
75946+ if (p3 == p)
75947+ break;
75948+ p3 = p3->real_parent;
75949+ }
75950+ if (p3 == p)
75951+ break;
75952+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
75953+ gr_handle_alertkill(p);
75954+ rcu_read_unlock();
75955+ put_files_struct(files);
75956+ read_unlock(&tasklist_lock);
75957+ fput(our_file);
75958+ return 0;
75959+ }
75960+ }
75961+ rcu_read_unlock();
75962+ put_files_struct(files);
75963+ } while_each_thread(p2, p);
75964+ read_unlock(&tasklist_lock);
75965+
75966+ fput(our_file);
75967+ return 1;
75968+}
75969+
75970+ssize_t
75971+write_grsec_handler(struct file *file, const char __user * buf, size_t count, loff_t *ppos)
75972+{
75973+ struct gr_arg_wrapper uwrap;
75974+ unsigned char *sprole_salt = NULL;
75975+ unsigned char *sprole_sum = NULL;
75976+ int error = 0;
75977+ int error2 = 0;
75978+ size_t req_count = 0;
75979+ unsigned char oldmode = 0;
75980+
75981+ mutex_lock(&gr_dev_mutex);
75982+
75983+ if (gr_acl_is_enabled() && !(current->acl->mode & GR_KERNELAUTH)) {
75984+ error = -EPERM;
75985+ goto out;
75986+ }
75987+
75988+#ifdef CONFIG_COMPAT
75989+ pax_open_kernel();
75990+ if (is_compat_task()) {
75991+ copy_gr_arg_wrapper = &copy_gr_arg_wrapper_compat;
75992+ copy_gr_arg = &copy_gr_arg_compat;
75993+ copy_acl_object_label = &copy_acl_object_label_compat;
75994+ copy_acl_subject_label = &copy_acl_subject_label_compat;
75995+ copy_acl_role_label = &copy_acl_role_label_compat;
75996+ copy_acl_ip_label = &copy_acl_ip_label_compat;
75997+ copy_role_allowed_ip = &copy_role_allowed_ip_compat;
75998+ copy_role_transition = &copy_role_transition_compat;
75999+ copy_sprole_pw = &copy_sprole_pw_compat;
76000+ copy_gr_hash_struct = &copy_gr_hash_struct_compat;
76001+ copy_pointer_from_array = &copy_pointer_from_array_compat;
76002+ get_gr_arg_wrapper_size = &get_gr_arg_wrapper_size_compat;
76003+ } else {
76004+ copy_gr_arg_wrapper = &copy_gr_arg_wrapper_normal;
76005+ copy_gr_arg = &copy_gr_arg_normal;
76006+ copy_acl_object_label = &copy_acl_object_label_normal;
76007+ copy_acl_subject_label = &copy_acl_subject_label_normal;
76008+ copy_acl_role_label = &copy_acl_role_label_normal;
76009+ copy_acl_ip_label = &copy_acl_ip_label_normal;
76010+ copy_role_allowed_ip = &copy_role_allowed_ip_normal;
76011+ copy_role_transition = &copy_role_transition_normal;
76012+ copy_sprole_pw = &copy_sprole_pw_normal;
76013+ copy_gr_hash_struct = &copy_gr_hash_struct_normal;
76014+ copy_pointer_from_array = &copy_pointer_from_array_normal;
76015+ get_gr_arg_wrapper_size = &get_gr_arg_wrapper_size_normal;
76016+ }
76017+ pax_close_kernel();
76018+#endif
76019+
76020+ req_count = get_gr_arg_wrapper_size();
76021+
76022+ if (count != req_count) {
76023+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)req_count);
76024+ error = -EINVAL;
76025+ goto out;
76026+ }
76027+
76028+
76029+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
76030+ gr_auth_expires = 0;
76031+ gr_auth_attempts = 0;
76032+ }
76033+
76034+ error = copy_gr_arg_wrapper(buf, &uwrap);
76035+ if (error)
76036+ goto out;
76037+
76038+ error = copy_gr_arg(uwrap.arg, gr_usermode);
76039+ if (error)
76040+ goto out;
76041+
76042+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
76043+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
76044+ time_after(gr_auth_expires, get_seconds())) {
76045+ error = -EBUSY;
76046+ goto out;
76047+ }
76048+
76049+ /* if non-root trying to do anything other than use a special role,
76050+ do not attempt authentication, do not count towards authentication
76051+ locking
76052+ */
76053+
76054+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
76055+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
76056+ gr_is_global_nonroot(current_uid())) {
76057+ error = -EPERM;
76058+ goto out;
76059+ }
76060+
76061+ /* ensure pw and special role name are null terminated */
76062+
76063+ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
76064+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
76065+
76066+ /* Okay.
76067+ * We have our enough of the argument structure..(we have yet
76068+ * to copy_from_user the tables themselves) . Copy the tables
76069+ * only if we need them, i.e. for loading operations. */
76070+
76071+ switch (gr_usermode->mode) {
76072+ case GR_STATUS:
76073+ if (gr_acl_is_enabled()) {
76074+ error = 1;
76075+ if (!gr_check_secure_terminal(current))
76076+ error = 3;
76077+ } else
76078+ error = 2;
76079+ goto out;
76080+ case GR_SHUTDOWN:
76081+ if (gr_acl_is_enabled() && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
76082+ stop_machine(gr_rbac_disable, NULL, NULL);
76083+ free_variables(false);
76084+ memset(gr_usermode, 0, sizeof(struct gr_arg));
76085+ memset(gr_system_salt, 0, GR_SALT_LEN);
76086+ memset(gr_system_sum, 0, GR_SHA_LEN);
76087+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
76088+ } else if (gr_acl_is_enabled()) {
76089+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
76090+ error = -EPERM;
76091+ } else {
76092+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
76093+ error = -EAGAIN;
76094+ }
76095+ break;
76096+ case GR_ENABLE:
76097+ if (!gr_acl_is_enabled() && !(error2 = gracl_init(gr_usermode)))
76098+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
76099+ else {
76100+ if (gr_acl_is_enabled())
76101+ error = -EAGAIN;
76102+ else
76103+ error = error2;
76104+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
76105+ }
76106+ break;
76107+ case GR_OLDRELOAD:
76108+ oldmode = 1;
76109+ case GR_RELOAD:
76110+ if (!gr_acl_is_enabled()) {
76111+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
76112+ error = -EAGAIN;
76113+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
76114+ error2 = gracl_reload(gr_usermode, oldmode);
76115+ if (!error2)
76116+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
76117+ else {
76118+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
76119+ error = error2;
76120+ }
76121+ } else {
76122+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
76123+ error = -EPERM;
76124+ }
76125+ break;
76126+ case GR_SEGVMOD:
76127+ if (unlikely(!gr_acl_is_enabled())) {
76128+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
76129+ error = -EAGAIN;
76130+ break;
76131+ }
76132+
76133+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
76134+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
76135+ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
76136+ struct acl_subject_label *segvacl;
76137+ segvacl =
76138+ lookup_acl_subj_label(gr_usermode->segv_inode,
76139+ gr_usermode->segv_device,
76140+ current->role);
76141+ if (segvacl) {
76142+ segvacl->crashes = 0;
76143+ segvacl->expires = 0;
76144+ }
76145+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
76146+ gr_remove_uid(gr_usermode->segv_uid);
76147+ }
76148+ } else {
76149+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
76150+ error = -EPERM;
76151+ }
76152+ break;
76153+ case GR_SPROLE:
76154+ case GR_SPROLEPAM:
76155+ if (unlikely(!gr_acl_is_enabled())) {
76156+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
76157+ error = -EAGAIN;
76158+ break;
76159+ }
76160+
76161+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
76162+ current->role->expires = 0;
76163+ current->role->auth_attempts = 0;
76164+ }
76165+
76166+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
76167+ time_after(current->role->expires, get_seconds())) {
76168+ error = -EBUSY;
76169+ goto out;
76170+ }
76171+
76172+ if (lookup_special_role_auth
76173+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
76174+ && ((!sprole_salt && !sprole_sum)
76175+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
76176+ char *p = "";
76177+ assign_special_role(gr_usermode->sp_role);
76178+ read_lock(&tasklist_lock);
76179+ if (current->real_parent)
76180+ p = current->real_parent->role->rolename;
76181+ read_unlock(&tasklist_lock);
76182+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
76183+ p, acl_sp_role_value);
76184+ } else {
76185+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
76186+ error = -EPERM;
76187+ if(!(current->role->auth_attempts++))
76188+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
76189+
76190+ goto out;
76191+ }
76192+ break;
76193+ case GR_UNSPROLE:
76194+ if (unlikely(!gr_acl_is_enabled())) {
76195+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
76196+ error = -EAGAIN;
76197+ break;
76198+ }
76199+
76200+ if (current->role->roletype & GR_ROLE_SPECIAL) {
76201+ char *p = "";
76202+ int i = 0;
76203+
76204+ read_lock(&tasklist_lock);
76205+ if (current->real_parent) {
76206+ p = current->real_parent->role->rolename;
76207+ i = current->real_parent->acl_role_id;
76208+ }
76209+ read_unlock(&tasklist_lock);
76210+
76211+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
76212+ gr_set_acls(1);
76213+ } else {
76214+ error = -EPERM;
76215+ goto out;
76216+ }
76217+ break;
76218+ default:
76219+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
76220+ error = -EINVAL;
76221+ break;
76222+ }
76223+
76224+ if (error != -EPERM)
76225+ goto out;
76226+
76227+ if(!(gr_auth_attempts++))
76228+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
76229+
76230+ out:
76231+ mutex_unlock(&gr_dev_mutex);
76232+
76233+ if (!error)
76234+ error = req_count;
76235+
76236+ return error;
76237+}
76238+
76239+int
76240+gr_set_acls(const int type)
76241+{
76242+ struct task_struct *task, *task2;
76243+ struct acl_role_label *role = current->role;
76244+ struct acl_subject_label *subj;
76245+ __u16 acl_role_id = current->acl_role_id;
76246+ const struct cred *cred;
76247+ int ret;
76248+
76249+ rcu_read_lock();
76250+ read_lock(&tasklist_lock);
76251+ read_lock(&grsec_exec_file_lock);
76252+ do_each_thread(task2, task) {
76253+ /* check to see if we're called from the exit handler,
76254+ if so, only replace ACLs that have inherited the admin
76255+ ACL */
76256+
76257+ if (type && (task->role != role ||
76258+ task->acl_role_id != acl_role_id))
76259+ continue;
76260+
76261+ task->acl_role_id = 0;
76262+ task->acl_sp_role = 0;
76263+ task->inherited = 0;
76264+
76265+ if (task->exec_file) {
76266+ cred = __task_cred(task);
76267+ task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
76268+ subj = __gr_get_subject_for_task(polstate, task, NULL);
76269+ if (subj == NULL) {
76270+ ret = -EINVAL;
76271+ read_unlock(&grsec_exec_file_lock);
76272+ read_unlock(&tasklist_lock);
76273+ rcu_read_unlock();
76274+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task_pid_nr(task));
76275+ return ret;
76276+ }
76277+ __gr_apply_subject_to_task(polstate, task, subj);
76278+ } else {
76279+ // it's a kernel process
76280+ task->role = polstate->kernel_role;
76281+ task->acl = polstate->kernel_role->root_label;
76282+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
76283+ task->acl->mode &= ~GR_PROCFIND;
76284+#endif
76285+ }
76286+ } while_each_thread(task2, task);
76287+ read_unlock(&grsec_exec_file_lock);
76288+ read_unlock(&tasklist_lock);
76289+ rcu_read_unlock();
76290+
76291+ return 0;
76292+}
76293diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
76294new file mode 100644
76295index 0000000..39645c9
76296--- /dev/null
76297+++ b/grsecurity/gracl_res.c
76298@@ -0,0 +1,68 @@
76299+#include <linux/kernel.h>
76300+#include <linux/sched.h>
76301+#include <linux/gracl.h>
76302+#include <linux/grinternal.h>
76303+
76304+static const char *restab_log[] = {
76305+ [RLIMIT_CPU] = "RLIMIT_CPU",
76306+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
76307+ [RLIMIT_DATA] = "RLIMIT_DATA",
76308+ [RLIMIT_STACK] = "RLIMIT_STACK",
76309+ [RLIMIT_CORE] = "RLIMIT_CORE",
76310+ [RLIMIT_RSS] = "RLIMIT_RSS",
76311+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
76312+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
76313+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
76314+ [RLIMIT_AS] = "RLIMIT_AS",
76315+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
76316+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
76317+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
76318+ [RLIMIT_NICE] = "RLIMIT_NICE",
76319+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
76320+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
76321+ [GR_CRASH_RES] = "RLIMIT_CRASH"
76322+};
76323+
76324+void
76325+gr_log_resource(const struct task_struct *task,
76326+ const int res, const unsigned long wanted, const int gt)
76327+{
76328+ const struct cred *cred;
76329+ unsigned long rlim;
76330+
76331+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
76332+ return;
76333+
76334+ // not yet supported resource
76335+ if (unlikely(!restab_log[res]))
76336+ return;
76337+
76338+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
76339+ rlim = task_rlimit_max(task, res);
76340+ else
76341+ rlim = task_rlimit(task, res);
76342+
76343+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
76344+ return;
76345+
76346+ rcu_read_lock();
76347+ cred = __task_cred(task);
76348+
76349+ if (res == RLIMIT_NPROC &&
76350+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
76351+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
76352+ goto out_rcu_unlock;
76353+ else if (res == RLIMIT_MEMLOCK &&
76354+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
76355+ goto out_rcu_unlock;
76356+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
76357+ goto out_rcu_unlock;
76358+ rcu_read_unlock();
76359+
76360+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
76361+
76362+ return;
76363+out_rcu_unlock:
76364+ rcu_read_unlock();
76365+ return;
76366+}
76367diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
76368new file mode 100644
76369index 0000000..2040e61
76370--- /dev/null
76371+++ b/grsecurity/gracl_segv.c
76372@@ -0,0 +1,313 @@
76373+#include <linux/kernel.h>
76374+#include <linux/mm.h>
76375+#include <asm/uaccess.h>
76376+#include <asm/errno.h>
76377+#include <asm/mman.h>
76378+#include <net/sock.h>
76379+#include <linux/file.h>
76380+#include <linux/fs.h>
76381+#include <linux/net.h>
76382+#include <linux/in.h>
76383+#include <linux/slab.h>
76384+#include <linux/types.h>
76385+#include <linux/sched.h>
76386+#include <linux/timer.h>
76387+#include <linux/gracl.h>
76388+#include <linux/grsecurity.h>
76389+#include <linux/grinternal.h>
76390+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
76391+#include <linux/magic.h>
76392+#include <linux/pagemap.h>
76393+#include "../fs/btrfs/async-thread.h"
76394+#include "../fs/btrfs/ctree.h"
76395+#include "../fs/btrfs/btrfs_inode.h"
76396+#endif
76397+
76398+static struct crash_uid *uid_set;
76399+static unsigned short uid_used;
76400+static DEFINE_SPINLOCK(gr_uid_lock);
76401+extern rwlock_t gr_inode_lock;
76402+extern struct acl_subject_label *
76403+ lookup_acl_subj_label(const ino_t inode, const dev_t dev,
76404+ struct acl_role_label *role);
76405+
76406+static inline dev_t __get_dev(const struct dentry *dentry)
76407+{
76408+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
76409+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
76410+ return BTRFS_I(dentry->d_inode)->root->anon_dev;
76411+ else
76412+#endif
76413+ return dentry->d_sb->s_dev;
76414+}
76415+
76416+int
76417+gr_init_uidset(void)
76418+{
76419+ uid_set =
76420+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
76421+ uid_used = 0;
76422+
76423+ return uid_set ? 1 : 0;
76424+}
76425+
76426+void
76427+gr_free_uidset(void)
76428+{
76429+ if (uid_set) {
76430+ struct crash_uid *tmpset;
76431+ spin_lock(&gr_uid_lock);
76432+ tmpset = uid_set;
76433+ uid_set = NULL;
76434+ uid_used = 0;
76435+ spin_unlock(&gr_uid_lock);
76436+ if (tmpset)
76437+ kfree(tmpset);
76438+ }
76439+
76440+ return;
76441+}
76442+
76443+int
76444+gr_find_uid(const uid_t uid)
76445+{
76446+ struct crash_uid *tmp = uid_set;
76447+ uid_t buid;
76448+ int low = 0, high = uid_used - 1, mid;
76449+
76450+ while (high >= low) {
76451+ mid = (low + high) >> 1;
76452+ buid = tmp[mid].uid;
76453+ if (buid == uid)
76454+ return mid;
76455+ if (buid > uid)
76456+ high = mid - 1;
76457+ if (buid < uid)
76458+ low = mid + 1;
76459+ }
76460+
76461+ return -1;
76462+}
76463+
76464+static __inline__ void
76465+gr_insertsort(void)
76466+{
76467+ unsigned short i, j;
76468+ struct crash_uid index;
76469+
76470+ for (i = 1; i < uid_used; i++) {
76471+ index = uid_set[i];
76472+ j = i;
76473+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
76474+ uid_set[j] = uid_set[j - 1];
76475+ j--;
76476+ }
76477+ uid_set[j] = index;
76478+ }
76479+
76480+ return;
76481+}
76482+
76483+static __inline__ void
76484+gr_insert_uid(const kuid_t kuid, const unsigned long expires)
76485+{
76486+ int loc;
76487+ uid_t uid = GR_GLOBAL_UID(kuid);
76488+
76489+ if (uid_used == GR_UIDTABLE_MAX)
76490+ return;
76491+
76492+ loc = gr_find_uid(uid);
76493+
76494+ if (loc >= 0) {
76495+ uid_set[loc].expires = expires;
76496+ return;
76497+ }
76498+
76499+ uid_set[uid_used].uid = uid;
76500+ uid_set[uid_used].expires = expires;
76501+ uid_used++;
76502+
76503+ gr_insertsort();
76504+
76505+ return;
76506+}
76507+
76508+void
76509+gr_remove_uid(const unsigned short loc)
76510+{
76511+ unsigned short i;
76512+
76513+ for (i = loc + 1; i < uid_used; i++)
76514+ uid_set[i - 1] = uid_set[i];
76515+
76516+ uid_used--;
76517+
76518+ return;
76519+}
76520+
76521+int
76522+gr_check_crash_uid(const kuid_t kuid)
76523+{
76524+ int loc;
76525+ int ret = 0;
76526+ uid_t uid;
76527+
76528+ if (unlikely(!gr_acl_is_enabled()))
76529+ return 0;
76530+
76531+ uid = GR_GLOBAL_UID(kuid);
76532+
76533+ spin_lock(&gr_uid_lock);
76534+ loc = gr_find_uid(uid);
76535+
76536+ if (loc < 0)
76537+ goto out_unlock;
76538+
76539+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
76540+ gr_remove_uid(loc);
76541+ else
76542+ ret = 1;
76543+
76544+out_unlock:
76545+ spin_unlock(&gr_uid_lock);
76546+ return ret;
76547+}
76548+
76549+static __inline__ int
76550+proc_is_setxid(const struct cred *cred)
76551+{
76552+ if (!uid_eq(cred->uid, cred->euid) || !uid_eq(cred->uid, cred->suid) ||
76553+ !uid_eq(cred->uid, cred->fsuid))
76554+ return 1;
76555+ if (!gid_eq(cred->gid, cred->egid) || !gid_eq(cred->gid, cred->sgid) ||
76556+ !gid_eq(cred->gid, cred->fsgid))
76557+ return 1;
76558+
76559+ return 0;
76560+}
76561+
76562+extern int gr_fake_force_sig(int sig, struct task_struct *t);
76563+
76564+void
76565+gr_handle_crash(struct task_struct *task, const int sig)
76566+{
76567+ struct acl_subject_label *curr;
76568+ struct task_struct *tsk, *tsk2;
76569+ const struct cred *cred;
76570+ const struct cred *cred2;
76571+
76572+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
76573+ return;
76574+
76575+ if (unlikely(!gr_acl_is_enabled()))
76576+ return;
76577+
76578+ curr = task->acl;
76579+
76580+ if (!(curr->resmask & (1U << GR_CRASH_RES)))
76581+ return;
76582+
76583+ if (time_before_eq(curr->expires, get_seconds())) {
76584+ curr->expires = 0;
76585+ curr->crashes = 0;
76586+ }
76587+
76588+ curr->crashes++;
76589+
76590+ if (!curr->expires)
76591+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
76592+
76593+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
76594+ time_after(curr->expires, get_seconds())) {
76595+ rcu_read_lock();
76596+ cred = __task_cred(task);
76597+ if (gr_is_global_nonroot(cred->uid) && proc_is_setxid(cred)) {
76598+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
76599+ spin_lock(&gr_uid_lock);
76600+ gr_insert_uid(cred->uid, curr->expires);
76601+ spin_unlock(&gr_uid_lock);
76602+ curr->expires = 0;
76603+ curr->crashes = 0;
76604+ read_lock(&tasklist_lock);
76605+ do_each_thread(tsk2, tsk) {
76606+ cred2 = __task_cred(tsk);
76607+ if (tsk != task && uid_eq(cred2->uid, cred->uid))
76608+ gr_fake_force_sig(SIGKILL, tsk);
76609+ } while_each_thread(tsk2, tsk);
76610+ read_unlock(&tasklist_lock);
76611+ } else {
76612+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
76613+ read_lock(&tasklist_lock);
76614+ read_lock(&grsec_exec_file_lock);
76615+ do_each_thread(tsk2, tsk) {
76616+ if (likely(tsk != task)) {
76617+ // if this thread has the same subject as the one that triggered
76618+ // RES_CRASH and it's the same binary, kill it
76619+ if (tsk->acl == task->acl && gr_is_same_file(tsk->exec_file, task->exec_file))
76620+ gr_fake_force_sig(SIGKILL, tsk);
76621+ }
76622+ } while_each_thread(tsk2, tsk);
76623+ read_unlock(&grsec_exec_file_lock);
76624+ read_unlock(&tasklist_lock);
76625+ }
76626+ rcu_read_unlock();
76627+ }
76628+
76629+ return;
76630+}
76631+
76632+int
76633+gr_check_crash_exec(const struct file *filp)
76634+{
76635+ struct acl_subject_label *curr;
76636+
76637+ if (unlikely(!gr_acl_is_enabled()))
76638+ return 0;
76639+
76640+ read_lock(&gr_inode_lock);
76641+ curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
76642+ __get_dev(filp->f_path.dentry),
76643+ current->role);
76644+ read_unlock(&gr_inode_lock);
76645+
76646+ if (!curr || !(curr->resmask & (1U << GR_CRASH_RES)) ||
76647+ (!curr->crashes && !curr->expires))
76648+ return 0;
76649+
76650+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
76651+ time_after(curr->expires, get_seconds()))
76652+ return 1;
76653+ else if (time_before_eq(curr->expires, get_seconds())) {
76654+ curr->crashes = 0;
76655+ curr->expires = 0;
76656+ }
76657+
76658+ return 0;
76659+}
76660+
76661+void
76662+gr_handle_alertkill(struct task_struct *task)
76663+{
76664+ struct acl_subject_label *curracl;
76665+ __u32 curr_ip;
76666+ struct task_struct *p, *p2;
76667+
76668+ if (unlikely(!gr_acl_is_enabled()))
76669+ return;
76670+
76671+ curracl = task->acl;
76672+ curr_ip = task->signal->curr_ip;
76673+
76674+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
76675+ read_lock(&tasklist_lock);
76676+ do_each_thread(p2, p) {
76677+ if (p->signal->curr_ip == curr_ip)
76678+ gr_fake_force_sig(SIGKILL, p);
76679+ } while_each_thread(p2, p);
76680+ read_unlock(&tasklist_lock);
76681+ } else if (curracl->mode & GR_KILLPROC)
76682+ gr_fake_force_sig(SIGKILL, task);
76683+
76684+ return;
76685+}
76686diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
76687new file mode 100644
76688index 0000000..6b0c9cc
76689--- /dev/null
76690+++ b/grsecurity/gracl_shm.c
76691@@ -0,0 +1,40 @@
76692+#include <linux/kernel.h>
76693+#include <linux/mm.h>
76694+#include <linux/sched.h>
76695+#include <linux/file.h>
76696+#include <linux/ipc.h>
76697+#include <linux/gracl.h>
76698+#include <linux/grsecurity.h>
76699+#include <linux/grinternal.h>
76700+
76701+int
76702+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
76703+ const u64 shm_createtime, const kuid_t cuid, const int shmid)
76704+{
76705+ struct task_struct *task;
76706+
76707+ if (!gr_acl_is_enabled())
76708+ return 1;
76709+
76710+ rcu_read_lock();
76711+ read_lock(&tasklist_lock);
76712+
76713+ task = find_task_by_vpid(shm_cprid);
76714+
76715+ if (unlikely(!task))
76716+ task = find_task_by_vpid(shm_lapid);
76717+
76718+ if (unlikely(task && (time_before_eq64(task->start_time, shm_createtime) ||
76719+ (task_pid_nr(task) == shm_lapid)) &&
76720+ (task->acl->mode & GR_PROTSHM) &&
76721+ (task->acl != current->acl))) {
76722+ read_unlock(&tasklist_lock);
76723+ rcu_read_unlock();
76724+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, GR_GLOBAL_UID(cuid), shm_cprid, shmid);
76725+ return 0;
76726+ }
76727+ read_unlock(&tasklist_lock);
76728+ rcu_read_unlock();
76729+
76730+ return 1;
76731+}
76732diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
76733new file mode 100644
76734index 0000000..bc0be01
76735--- /dev/null
76736+++ b/grsecurity/grsec_chdir.c
76737@@ -0,0 +1,19 @@
76738+#include <linux/kernel.h>
76739+#include <linux/sched.h>
76740+#include <linux/fs.h>
76741+#include <linux/file.h>
76742+#include <linux/grsecurity.h>
76743+#include <linux/grinternal.h>
76744+
76745+void
76746+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
76747+{
76748+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
76749+ if ((grsec_enable_chdir && grsec_enable_group &&
76750+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
76751+ !grsec_enable_group)) {
76752+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
76753+ }
76754+#endif
76755+ return;
76756+}
76757diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
76758new file mode 100644
76759index 0000000..6d99cec
76760--- /dev/null
76761+++ b/grsecurity/grsec_chroot.c
76762@@ -0,0 +1,385 @@
76763+#include <linux/kernel.h>
76764+#include <linux/module.h>
76765+#include <linux/sched.h>
76766+#include <linux/file.h>
76767+#include <linux/fs.h>
76768+#include <linux/mount.h>
76769+#include <linux/types.h>
76770+#include "../fs/mount.h"
76771+#include <linux/grsecurity.h>
76772+#include <linux/grinternal.h>
76773+
76774+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
76775+int gr_init_ran;
76776+#endif
76777+
76778+void gr_set_chroot_entries(struct task_struct *task, const struct path *path)
76779+{
76780+#ifdef CONFIG_GRKERNSEC
76781+ if (task_pid_nr(task) > 1 && path->dentry != init_task.fs->root.dentry &&
76782+ path->dentry != task->nsproxy->mnt_ns->root->mnt.mnt_root
76783+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
76784+ && gr_init_ran
76785+#endif
76786+ )
76787+ task->gr_is_chrooted = 1;
76788+ else {
76789+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
76790+ if (task_pid_nr(task) == 1 && !gr_init_ran)
76791+ gr_init_ran = 1;
76792+#endif
76793+ task->gr_is_chrooted = 0;
76794+ }
76795+
76796+ task->gr_chroot_dentry = path->dentry;
76797+#endif
76798+ return;
76799+}
76800+
76801+void gr_clear_chroot_entries(struct task_struct *task)
76802+{
76803+#ifdef CONFIG_GRKERNSEC
76804+ task->gr_is_chrooted = 0;
76805+ task->gr_chroot_dentry = NULL;
76806+#endif
76807+ return;
76808+}
76809+
76810+int
76811+gr_handle_chroot_unix(const pid_t pid)
76812+{
76813+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
76814+ struct task_struct *p;
76815+
76816+ if (unlikely(!grsec_enable_chroot_unix))
76817+ return 1;
76818+
76819+ if (likely(!proc_is_chrooted(current)))
76820+ return 1;
76821+
76822+ rcu_read_lock();
76823+ read_lock(&tasklist_lock);
76824+ p = find_task_by_vpid_unrestricted(pid);
76825+ if (unlikely(p && !have_same_root(current, p))) {
76826+ read_unlock(&tasklist_lock);
76827+ rcu_read_unlock();
76828+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
76829+ return 0;
76830+ }
76831+ read_unlock(&tasklist_lock);
76832+ rcu_read_unlock();
76833+#endif
76834+ return 1;
76835+}
76836+
76837+int
76838+gr_handle_chroot_nice(void)
76839+{
76840+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
76841+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
76842+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
76843+ return -EPERM;
76844+ }
76845+#endif
76846+ return 0;
76847+}
76848+
76849+int
76850+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
76851+{
76852+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
76853+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
76854+ && proc_is_chrooted(current)) {
76855+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, task_pid_nr(p));
76856+ return -EACCES;
76857+ }
76858+#endif
76859+ return 0;
76860+}
76861+
76862+int
76863+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
76864+{
76865+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
76866+ struct task_struct *p;
76867+ int ret = 0;
76868+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
76869+ return ret;
76870+
76871+ read_lock(&tasklist_lock);
76872+ do_each_pid_task(pid, type, p) {
76873+ if (!have_same_root(current, p)) {
76874+ ret = 1;
76875+ goto out;
76876+ }
76877+ } while_each_pid_task(pid, type, p);
76878+out:
76879+ read_unlock(&tasklist_lock);
76880+ return ret;
76881+#endif
76882+ return 0;
76883+}
76884+
76885+int
76886+gr_pid_is_chrooted(struct task_struct *p)
76887+{
76888+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
76889+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
76890+ return 0;
76891+
76892+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
76893+ !have_same_root(current, p)) {
76894+ return 1;
76895+ }
76896+#endif
76897+ return 0;
76898+}
76899+
76900+EXPORT_SYMBOL_GPL(gr_pid_is_chrooted);
76901+
76902+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
76903+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
76904+{
76905+ struct path path, currentroot;
76906+ int ret = 0;
76907+
76908+ path.dentry = (struct dentry *)u_dentry;
76909+ path.mnt = (struct vfsmount *)u_mnt;
76910+ get_fs_root(current->fs, &currentroot);
76911+ if (path_is_under(&path, &currentroot))
76912+ ret = 1;
76913+ path_put(&currentroot);
76914+
76915+ return ret;
76916+}
76917+#endif
76918+
76919+int
76920+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
76921+{
76922+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
76923+ if (!grsec_enable_chroot_fchdir)
76924+ return 1;
76925+
76926+ if (!proc_is_chrooted(current))
76927+ return 1;
76928+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
76929+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
76930+ return 0;
76931+ }
76932+#endif
76933+ return 1;
76934+}
76935+
76936+int
76937+gr_chroot_fhandle(void)
76938+{
76939+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
76940+ if (!grsec_enable_chroot_fchdir)
76941+ return 1;
76942+
76943+ if (!proc_is_chrooted(current))
76944+ return 1;
76945+ else {
76946+ gr_log_noargs(GR_DONT_AUDIT, GR_CHROOT_FHANDLE_MSG);
76947+ return 0;
76948+ }
76949+#endif
76950+ return 1;
76951+}
76952+
76953+int
76954+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
76955+ const u64 shm_createtime)
76956+{
76957+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
76958+ struct task_struct *p;
76959+
76960+ if (unlikely(!grsec_enable_chroot_shmat))
76961+ return 1;
76962+
76963+ if (likely(!proc_is_chrooted(current)))
76964+ return 1;
76965+
76966+ rcu_read_lock();
76967+ read_lock(&tasklist_lock);
76968+
76969+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
76970+ if (time_before_eq64(p->start_time, shm_createtime)) {
76971+ if (have_same_root(current, p)) {
76972+ goto allow;
76973+ } else {
76974+ read_unlock(&tasklist_lock);
76975+ rcu_read_unlock();
76976+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
76977+ return 0;
76978+ }
76979+ }
76980+ /* creator exited, pid reuse, fall through to next check */
76981+ }
76982+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
76983+ if (unlikely(!have_same_root(current, p))) {
76984+ read_unlock(&tasklist_lock);
76985+ rcu_read_unlock();
76986+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
76987+ return 0;
76988+ }
76989+ }
76990+
76991+allow:
76992+ read_unlock(&tasklist_lock);
76993+ rcu_read_unlock();
76994+#endif
76995+ return 1;
76996+}
76997+
76998+void
76999+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
77000+{
77001+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
77002+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
77003+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
77004+#endif
77005+ return;
77006+}
77007+
77008+int
77009+gr_handle_chroot_mknod(const struct dentry *dentry,
77010+ const struct vfsmount *mnt, const int mode)
77011+{
77012+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
77013+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
77014+ proc_is_chrooted(current)) {
77015+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
77016+ return -EPERM;
77017+ }
77018+#endif
77019+ return 0;
77020+}
77021+
77022+int
77023+gr_handle_chroot_mount(const struct dentry *dentry,
77024+ const struct vfsmount *mnt, const char *dev_name)
77025+{
77026+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
77027+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
77028+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
77029+ return -EPERM;
77030+ }
77031+#endif
77032+ return 0;
77033+}
77034+
77035+int
77036+gr_handle_chroot_pivot(void)
77037+{
77038+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
77039+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
77040+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
77041+ return -EPERM;
77042+ }
77043+#endif
77044+ return 0;
77045+}
77046+
77047+int
77048+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
77049+{
77050+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
77051+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
77052+ !gr_is_outside_chroot(dentry, mnt)) {
77053+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
77054+ return -EPERM;
77055+ }
77056+#endif
77057+ return 0;
77058+}
77059+
77060+extern const char *captab_log[];
77061+extern int captab_log_entries;
77062+
77063+int
77064+gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
77065+{
77066+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
77067+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
77068+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
77069+ if (cap_raised(chroot_caps, cap)) {
77070+ if (cap_raised(cred->cap_effective, cap) && cap < captab_log_entries) {
77071+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, task, captab_log[cap]);
77072+ }
77073+ return 0;
77074+ }
77075+ }
77076+#endif
77077+ return 1;
77078+}
77079+
77080+int
77081+gr_chroot_is_capable(const int cap)
77082+{
77083+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
77084+ return gr_task_chroot_is_capable(current, current_cred(), cap);
77085+#endif
77086+ return 1;
77087+}
77088+
77089+int
77090+gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap)
77091+{
77092+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
77093+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
77094+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
77095+ if (cap_raised(chroot_caps, cap)) {
77096+ return 0;
77097+ }
77098+ }
77099+#endif
77100+ return 1;
77101+}
77102+
77103+int
77104+gr_chroot_is_capable_nolog(const int cap)
77105+{
77106+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
77107+ return gr_task_chroot_is_capable_nolog(current, cap);
77108+#endif
77109+ return 1;
77110+}
77111+
77112+int
77113+gr_handle_chroot_sysctl(const int op)
77114+{
77115+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
77116+ if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
77117+ proc_is_chrooted(current))
77118+ return -EACCES;
77119+#endif
77120+ return 0;
77121+}
77122+
77123+void
77124+gr_handle_chroot_chdir(const struct path *path)
77125+{
77126+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
77127+ if (grsec_enable_chroot_chdir)
77128+ set_fs_pwd(current->fs, path);
77129+#endif
77130+ return;
77131+}
77132+
77133+int
77134+gr_handle_chroot_chmod(const struct dentry *dentry,
77135+ const struct vfsmount *mnt, const int mode)
77136+{
77137+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
77138+ /* allow chmod +s on directories, but not files */
77139+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
77140+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
77141+ proc_is_chrooted(current)) {
77142+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
77143+ return -EPERM;
77144+ }
77145+#endif
77146+ return 0;
77147+}
77148diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
77149new file mode 100644
77150index 0000000..0f9ac91
77151--- /dev/null
77152+++ b/grsecurity/grsec_disabled.c
77153@@ -0,0 +1,440 @@
77154+#include <linux/kernel.h>
77155+#include <linux/module.h>
77156+#include <linux/sched.h>
77157+#include <linux/file.h>
77158+#include <linux/fs.h>
77159+#include <linux/kdev_t.h>
77160+#include <linux/net.h>
77161+#include <linux/in.h>
77162+#include <linux/ip.h>
77163+#include <linux/skbuff.h>
77164+#include <linux/sysctl.h>
77165+
77166+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
77167+void
77168+pax_set_initial_flags(struct linux_binprm *bprm)
77169+{
77170+ return;
77171+}
77172+#endif
77173+
77174+#ifdef CONFIG_SYSCTL
77175+__u32
77176+gr_handle_sysctl(const struct ctl_table * table, const int op)
77177+{
77178+ return 0;
77179+}
77180+#endif
77181+
77182+#ifdef CONFIG_TASKSTATS
77183+int gr_is_taskstats_denied(int pid)
77184+{
77185+ return 0;
77186+}
77187+#endif
77188+
77189+int
77190+gr_acl_is_enabled(void)
77191+{
77192+ return 0;
77193+}
77194+
77195+int
77196+gr_learn_cap(const struct task_struct *task, const struct cred *cred, const int cap)
77197+{
77198+ return 0;
77199+}
77200+
77201+void
77202+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
77203+{
77204+ return;
77205+}
77206+
77207+int
77208+gr_handle_rawio(const struct inode *inode)
77209+{
77210+ return 0;
77211+}
77212+
77213+void
77214+gr_acl_handle_psacct(struct task_struct *task, const long code)
77215+{
77216+ return;
77217+}
77218+
77219+int
77220+gr_handle_ptrace(struct task_struct *task, const long request)
77221+{
77222+ return 0;
77223+}
77224+
77225+int
77226+gr_handle_proc_ptrace(struct task_struct *task)
77227+{
77228+ return 0;
77229+}
77230+
77231+int
77232+gr_set_acls(const int type)
77233+{
77234+ return 0;
77235+}
77236+
77237+int
77238+gr_check_hidden_task(const struct task_struct *tsk)
77239+{
77240+ return 0;
77241+}
77242+
77243+int
77244+gr_check_protected_task(const struct task_struct *task)
77245+{
77246+ return 0;
77247+}
77248+
77249+int
77250+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
77251+{
77252+ return 0;
77253+}
77254+
77255+void
77256+gr_copy_label(struct task_struct *tsk)
77257+{
77258+ return;
77259+}
77260+
77261+void
77262+gr_set_pax_flags(struct task_struct *task)
77263+{
77264+ return;
77265+}
77266+
77267+int
77268+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
77269+ const int unsafe_share)
77270+{
77271+ return 0;
77272+}
77273+
77274+void
77275+gr_handle_delete(const ino_t ino, const dev_t dev)
77276+{
77277+ return;
77278+}
77279+
77280+void
77281+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
77282+{
77283+ return;
77284+}
77285+
77286+void
77287+gr_handle_crash(struct task_struct *task, const int sig)
77288+{
77289+ return;
77290+}
77291+
77292+int
77293+gr_check_crash_exec(const struct file *filp)
77294+{
77295+ return 0;
77296+}
77297+
77298+int
77299+gr_check_crash_uid(const kuid_t uid)
77300+{
77301+ return 0;
77302+}
77303+
77304+void
77305+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
77306+ struct dentry *old_dentry,
77307+ struct dentry *new_dentry,
77308+ struct vfsmount *mnt, const __u8 replace, unsigned int flags)
77309+{
77310+ return;
77311+}
77312+
77313+int
77314+gr_search_socket(const int family, const int type, const int protocol)
77315+{
77316+ return 1;
77317+}
77318+
77319+int
77320+gr_search_connectbind(const int mode, const struct socket *sock,
77321+ const struct sockaddr_in *addr)
77322+{
77323+ return 0;
77324+}
77325+
77326+void
77327+gr_handle_alertkill(struct task_struct *task)
77328+{
77329+ return;
77330+}
77331+
77332+__u32
77333+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
77334+{
77335+ return 1;
77336+}
77337+
77338+__u32
77339+gr_acl_handle_hidden_file(const struct dentry * dentry,
77340+ const struct vfsmount * mnt)
77341+{
77342+ return 1;
77343+}
77344+
77345+__u32
77346+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
77347+ int acc_mode)
77348+{
77349+ return 1;
77350+}
77351+
77352+__u32
77353+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
77354+{
77355+ return 1;
77356+}
77357+
77358+__u32
77359+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
77360+{
77361+ return 1;
77362+}
77363+
77364+int
77365+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
77366+ unsigned int *vm_flags)
77367+{
77368+ return 1;
77369+}
77370+
77371+__u32
77372+gr_acl_handle_truncate(const struct dentry * dentry,
77373+ const struct vfsmount * mnt)
77374+{
77375+ return 1;
77376+}
77377+
77378+__u32
77379+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
77380+{
77381+ return 1;
77382+}
77383+
77384+__u32
77385+gr_acl_handle_access(const struct dentry * dentry,
77386+ const struct vfsmount * mnt, const int fmode)
77387+{
77388+ return 1;
77389+}
77390+
77391+__u32
77392+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
77393+ umode_t *mode)
77394+{
77395+ return 1;
77396+}
77397+
77398+__u32
77399+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
77400+{
77401+ return 1;
77402+}
77403+
77404+__u32
77405+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
77406+{
77407+ return 1;
77408+}
77409+
77410+__u32
77411+gr_acl_handle_removexattr(const struct dentry * dentry, const struct vfsmount * mnt)
77412+{
77413+ return 1;
77414+}
77415+
77416+void
77417+grsecurity_init(void)
77418+{
77419+ return;
77420+}
77421+
77422+umode_t gr_acl_umask(void)
77423+{
77424+ return 0;
77425+}
77426+
77427+__u32
77428+gr_acl_handle_mknod(const struct dentry * new_dentry,
77429+ const struct dentry * parent_dentry,
77430+ const struct vfsmount * parent_mnt,
77431+ const int mode)
77432+{
77433+ return 1;
77434+}
77435+
77436+__u32
77437+gr_acl_handle_mkdir(const struct dentry * new_dentry,
77438+ const struct dentry * parent_dentry,
77439+ const struct vfsmount * parent_mnt)
77440+{
77441+ return 1;
77442+}
77443+
77444+__u32
77445+gr_acl_handle_symlink(const struct dentry * new_dentry,
77446+ const struct dentry * parent_dentry,
77447+ const struct vfsmount * parent_mnt, const struct filename *from)
77448+{
77449+ return 1;
77450+}
77451+
77452+__u32
77453+gr_acl_handle_link(const struct dentry * new_dentry,
77454+ const struct dentry * parent_dentry,
77455+ const struct vfsmount * parent_mnt,
77456+ const struct dentry * old_dentry,
77457+ const struct vfsmount * old_mnt, const struct filename *to)
77458+{
77459+ return 1;
77460+}
77461+
77462+int
77463+gr_acl_handle_rename(const struct dentry *new_dentry,
77464+ const struct dentry *parent_dentry,
77465+ const struct vfsmount *parent_mnt,
77466+ const struct dentry *old_dentry,
77467+ const struct inode *old_parent_inode,
77468+ const struct vfsmount *old_mnt, const struct filename *newname,
77469+ unsigned int flags)
77470+{
77471+ return 0;
77472+}
77473+
77474+int
77475+gr_acl_handle_filldir(const struct file *file, const char *name,
77476+ const int namelen, const ino_t ino)
77477+{
77478+ return 1;
77479+}
77480+
77481+int
77482+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
77483+ const u64 shm_createtime, const kuid_t cuid, const int shmid)
77484+{
77485+ return 1;
77486+}
77487+
77488+int
77489+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
77490+{
77491+ return 0;
77492+}
77493+
77494+int
77495+gr_search_accept(const struct socket *sock)
77496+{
77497+ return 0;
77498+}
77499+
77500+int
77501+gr_search_listen(const struct socket *sock)
77502+{
77503+ return 0;
77504+}
77505+
77506+int
77507+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
77508+{
77509+ return 0;
77510+}
77511+
77512+__u32
77513+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
77514+{
77515+ return 1;
77516+}
77517+
77518+__u32
77519+gr_acl_handle_creat(const struct dentry * dentry,
77520+ const struct dentry * p_dentry,
77521+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
77522+ const int imode)
77523+{
77524+ return 1;
77525+}
77526+
77527+void
77528+gr_acl_handle_exit(void)
77529+{
77530+ return;
77531+}
77532+
77533+int
77534+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
77535+{
77536+ return 1;
77537+}
77538+
77539+void
77540+gr_set_role_label(const kuid_t uid, const kgid_t gid)
77541+{
77542+ return;
77543+}
77544+
77545+int
77546+gr_acl_handle_procpidmem(const struct task_struct *task)
77547+{
77548+ return 0;
77549+}
77550+
77551+int
77552+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
77553+{
77554+ return 0;
77555+}
77556+
77557+int
77558+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
77559+{
77560+ return 0;
77561+}
77562+
77563+int
77564+gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
77565+{
77566+ return 0;
77567+}
77568+
77569+int
77570+gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
77571+{
77572+ return 0;
77573+}
77574+
77575+int gr_acl_enable_at_secure(void)
77576+{
77577+ return 0;
77578+}
77579+
77580+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
77581+{
77582+ return dentry->d_sb->s_dev;
77583+}
77584+
77585+void gr_put_exec_file(struct task_struct *task)
77586+{
77587+ return;
77588+}
77589+
77590+#ifdef CONFIG_SECURITY
77591+EXPORT_SYMBOL_GPL(gr_check_user_change);
77592+EXPORT_SYMBOL_GPL(gr_check_group_change);
77593+#endif
77594diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
77595new file mode 100644
77596index 0000000..14638ff
77597--- /dev/null
77598+++ b/grsecurity/grsec_exec.c
77599@@ -0,0 +1,188 @@
77600+#include <linux/kernel.h>
77601+#include <linux/sched.h>
77602+#include <linux/file.h>
77603+#include <linux/binfmts.h>
77604+#include <linux/fs.h>
77605+#include <linux/types.h>
77606+#include <linux/grdefs.h>
77607+#include <linux/grsecurity.h>
77608+#include <linux/grinternal.h>
77609+#include <linux/capability.h>
77610+#include <linux/module.h>
77611+#include <linux/compat.h>
77612+
77613+#include <asm/uaccess.h>
77614+
77615+#ifdef CONFIG_GRKERNSEC_EXECLOG
77616+static char gr_exec_arg_buf[132];
77617+static DEFINE_MUTEX(gr_exec_arg_mutex);
77618+#endif
77619+
77620+struct user_arg_ptr {
77621+#ifdef CONFIG_COMPAT
77622+ bool is_compat;
77623+#endif
77624+ union {
77625+ const char __user *const __user *native;
77626+#ifdef CONFIG_COMPAT
77627+ const compat_uptr_t __user *compat;
77628+#endif
77629+ } ptr;
77630+};
77631+
77632+extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
77633+
77634+void
77635+gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
77636+{
77637+#ifdef CONFIG_GRKERNSEC_EXECLOG
77638+ char *grarg = gr_exec_arg_buf;
77639+ unsigned int i, x, execlen = 0;
77640+ char c;
77641+
77642+ if (!((grsec_enable_execlog && grsec_enable_group &&
77643+ in_group_p(grsec_audit_gid))
77644+ || (grsec_enable_execlog && !grsec_enable_group)))
77645+ return;
77646+
77647+ mutex_lock(&gr_exec_arg_mutex);
77648+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
77649+
77650+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
77651+ const char __user *p;
77652+ unsigned int len;
77653+
77654+ p = get_user_arg_ptr(argv, i);
77655+ if (IS_ERR(p))
77656+ goto log;
77657+
77658+ len = strnlen_user(p, 128 - execlen);
77659+ if (len > 128 - execlen)
77660+ len = 128 - execlen;
77661+ else if (len > 0)
77662+ len--;
77663+ if (copy_from_user(grarg + execlen, p, len))
77664+ goto log;
77665+
77666+ /* rewrite unprintable characters */
77667+ for (x = 0; x < len; x++) {
77668+ c = *(grarg + execlen + x);
77669+ if (c < 32 || c > 126)
77670+ *(grarg + execlen + x) = ' ';
77671+ }
77672+
77673+ execlen += len;
77674+ *(grarg + execlen) = ' ';
77675+ *(grarg + execlen + 1) = '\0';
77676+ execlen++;
77677+ }
77678+
77679+ log:
77680+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
77681+ bprm->file->f_path.mnt, grarg);
77682+ mutex_unlock(&gr_exec_arg_mutex);
77683+#endif
77684+ return;
77685+}
77686+
77687+#ifdef CONFIG_GRKERNSEC
77688+extern int gr_acl_is_capable(const int cap);
77689+extern int gr_acl_is_capable_nolog(const int cap);
77690+extern int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
77691+extern int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap);
77692+extern int gr_chroot_is_capable(const int cap);
77693+extern int gr_chroot_is_capable_nolog(const int cap);
77694+extern int gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
77695+extern int gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap);
77696+#endif
77697+
77698+const char *captab_log[] = {
77699+ "CAP_CHOWN",
77700+ "CAP_DAC_OVERRIDE",
77701+ "CAP_DAC_READ_SEARCH",
77702+ "CAP_FOWNER",
77703+ "CAP_FSETID",
77704+ "CAP_KILL",
77705+ "CAP_SETGID",
77706+ "CAP_SETUID",
77707+ "CAP_SETPCAP",
77708+ "CAP_LINUX_IMMUTABLE",
77709+ "CAP_NET_BIND_SERVICE",
77710+ "CAP_NET_BROADCAST",
77711+ "CAP_NET_ADMIN",
77712+ "CAP_NET_RAW",
77713+ "CAP_IPC_LOCK",
77714+ "CAP_IPC_OWNER",
77715+ "CAP_SYS_MODULE",
77716+ "CAP_SYS_RAWIO",
77717+ "CAP_SYS_CHROOT",
77718+ "CAP_SYS_PTRACE",
77719+ "CAP_SYS_PACCT",
77720+ "CAP_SYS_ADMIN",
77721+ "CAP_SYS_BOOT",
77722+ "CAP_SYS_NICE",
77723+ "CAP_SYS_RESOURCE",
77724+ "CAP_SYS_TIME",
77725+ "CAP_SYS_TTY_CONFIG",
77726+ "CAP_MKNOD",
77727+ "CAP_LEASE",
77728+ "CAP_AUDIT_WRITE",
77729+ "CAP_AUDIT_CONTROL",
77730+ "CAP_SETFCAP",
77731+ "CAP_MAC_OVERRIDE",
77732+ "CAP_MAC_ADMIN",
77733+ "CAP_SYSLOG",
77734+ "CAP_WAKE_ALARM",
77735+ "CAP_BLOCK_SUSPEND"
77736+};
77737+
77738+int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
77739+
77740+int gr_is_capable(const int cap)
77741+{
77742+#ifdef CONFIG_GRKERNSEC
77743+ if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
77744+ return 1;
77745+ return 0;
77746+#else
77747+ return 1;
77748+#endif
77749+}
77750+
77751+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
77752+{
77753+#ifdef CONFIG_GRKERNSEC
77754+ if (gr_task_acl_is_capable(task, cred, cap) && gr_task_chroot_is_capable(task, cred, cap))
77755+ return 1;
77756+ return 0;
77757+#else
77758+ return 1;
77759+#endif
77760+}
77761+
77762+int gr_is_capable_nolog(const int cap)
77763+{
77764+#ifdef CONFIG_GRKERNSEC
77765+ if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
77766+ return 1;
77767+ return 0;
77768+#else
77769+ return 1;
77770+#endif
77771+}
77772+
77773+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap)
77774+{
77775+#ifdef CONFIG_GRKERNSEC
77776+ if (gr_task_acl_is_capable_nolog(task, cap) && gr_task_chroot_is_capable_nolog(task, cap))
77777+ return 1;
77778+ return 0;
77779+#else
77780+ return 1;
77781+#endif
77782+}
77783+
77784+EXPORT_SYMBOL_GPL(gr_is_capable);
77785+EXPORT_SYMBOL_GPL(gr_is_capable_nolog);
77786+EXPORT_SYMBOL_GPL(gr_task_is_capable);
77787+EXPORT_SYMBOL_GPL(gr_task_is_capable_nolog);
77788diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
77789new file mode 100644
77790index 0000000..06cc6ea
77791--- /dev/null
77792+++ b/grsecurity/grsec_fifo.c
77793@@ -0,0 +1,24 @@
77794+#include <linux/kernel.h>
77795+#include <linux/sched.h>
77796+#include <linux/fs.h>
77797+#include <linux/file.h>
77798+#include <linux/grinternal.h>
77799+
77800+int
77801+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
77802+ const struct dentry *dir, const int flag, const int acc_mode)
77803+{
77804+#ifdef CONFIG_GRKERNSEC_FIFO
77805+ const struct cred *cred = current_cred();
77806+
77807+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
77808+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
77809+ !uid_eq(dentry->d_inode->i_uid, dir->d_inode->i_uid) &&
77810+ !uid_eq(cred->fsuid, dentry->d_inode->i_uid)) {
77811+ if (!inode_permission(dentry->d_inode, acc_mode))
77812+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, GR_GLOBAL_UID(dentry->d_inode->i_uid), GR_GLOBAL_GID(dentry->d_inode->i_gid));
77813+ return -EACCES;
77814+ }
77815+#endif
77816+ return 0;
77817+}
77818diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
77819new file mode 100644
77820index 0000000..8ca18bf
77821--- /dev/null
77822+++ b/grsecurity/grsec_fork.c
77823@@ -0,0 +1,23 @@
77824+#include <linux/kernel.h>
77825+#include <linux/sched.h>
77826+#include <linux/grsecurity.h>
77827+#include <linux/grinternal.h>
77828+#include <linux/errno.h>
77829+
77830+void
77831+gr_log_forkfail(const int retval)
77832+{
77833+#ifdef CONFIG_GRKERNSEC_FORKFAIL
77834+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
77835+ switch (retval) {
77836+ case -EAGAIN:
77837+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
77838+ break;
77839+ case -ENOMEM:
77840+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
77841+ break;
77842+ }
77843+ }
77844+#endif
77845+ return;
77846+}
77847diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
77848new file mode 100644
77849index 0000000..b7cb191
77850--- /dev/null
77851+++ b/grsecurity/grsec_init.c
77852@@ -0,0 +1,286 @@
77853+#include <linux/kernel.h>
77854+#include <linux/sched.h>
77855+#include <linux/mm.h>
77856+#include <linux/gracl.h>
77857+#include <linux/slab.h>
77858+#include <linux/vmalloc.h>
77859+#include <linux/percpu.h>
77860+#include <linux/module.h>
77861+
77862+int grsec_enable_ptrace_readexec;
77863+int grsec_enable_setxid;
77864+int grsec_enable_symlinkown;
77865+kgid_t grsec_symlinkown_gid;
77866+int grsec_enable_brute;
77867+int grsec_enable_link;
77868+int grsec_enable_dmesg;
77869+int grsec_enable_harden_ptrace;
77870+int grsec_enable_harden_ipc;
77871+int grsec_enable_fifo;
77872+int grsec_enable_execlog;
77873+int grsec_enable_signal;
77874+int grsec_enable_forkfail;
77875+int grsec_enable_audit_ptrace;
77876+int grsec_enable_time;
77877+int grsec_enable_group;
77878+kgid_t grsec_audit_gid;
77879+int grsec_enable_chdir;
77880+int grsec_enable_mount;
77881+int grsec_enable_rofs;
77882+int grsec_deny_new_usb;
77883+int grsec_enable_chroot_findtask;
77884+int grsec_enable_chroot_mount;
77885+int grsec_enable_chroot_shmat;
77886+int grsec_enable_chroot_fchdir;
77887+int grsec_enable_chroot_double;
77888+int grsec_enable_chroot_pivot;
77889+int grsec_enable_chroot_chdir;
77890+int grsec_enable_chroot_chmod;
77891+int grsec_enable_chroot_mknod;
77892+int grsec_enable_chroot_nice;
77893+int grsec_enable_chroot_execlog;
77894+int grsec_enable_chroot_caps;
77895+int grsec_enable_chroot_sysctl;
77896+int grsec_enable_chroot_unix;
77897+int grsec_enable_tpe;
77898+kgid_t grsec_tpe_gid;
77899+int grsec_enable_blackhole;
77900+#ifdef CONFIG_IPV6_MODULE
77901+EXPORT_SYMBOL_GPL(grsec_enable_blackhole);
77902+#endif
77903+int grsec_lastack_retries;
77904+int grsec_enable_tpe_all;
77905+int grsec_enable_tpe_invert;
77906+int grsec_enable_socket_all;
77907+kgid_t grsec_socket_all_gid;
77908+int grsec_enable_socket_client;
77909+kgid_t grsec_socket_client_gid;
77910+int grsec_enable_socket_server;
77911+kgid_t grsec_socket_server_gid;
77912+int grsec_resource_logging;
77913+int grsec_disable_privio;
77914+int grsec_enable_log_rwxmaps;
77915+int grsec_lock;
77916+
77917+DEFINE_SPINLOCK(grsec_alert_lock);
77918+unsigned long grsec_alert_wtime = 0;
77919+unsigned long grsec_alert_fyet = 0;
77920+
77921+DEFINE_SPINLOCK(grsec_audit_lock);
77922+
77923+DEFINE_RWLOCK(grsec_exec_file_lock);
77924+
77925+char *gr_shared_page[4];
77926+
77927+char *gr_alert_log_fmt;
77928+char *gr_audit_log_fmt;
77929+char *gr_alert_log_buf;
77930+char *gr_audit_log_buf;
77931+
77932+extern struct gr_arg *gr_usermode;
77933+extern unsigned char *gr_system_salt;
77934+extern unsigned char *gr_system_sum;
77935+
77936+void __init
77937+grsecurity_init(void)
77938+{
77939+ int j;
77940+ /* create the per-cpu shared pages */
77941+
77942+#ifdef CONFIG_X86
77943+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
77944+#endif
77945+
77946+ for (j = 0; j < 4; j++) {
77947+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
77948+ if (gr_shared_page[j] == NULL) {
77949+ panic("Unable to allocate grsecurity shared page");
77950+ return;
77951+ }
77952+ }
77953+
77954+ /* allocate log buffers */
77955+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
77956+ if (!gr_alert_log_fmt) {
77957+ panic("Unable to allocate grsecurity alert log format buffer");
77958+ return;
77959+ }
77960+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
77961+ if (!gr_audit_log_fmt) {
77962+ panic("Unable to allocate grsecurity audit log format buffer");
77963+ return;
77964+ }
77965+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
77966+ if (!gr_alert_log_buf) {
77967+ panic("Unable to allocate grsecurity alert log buffer");
77968+ return;
77969+ }
77970+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
77971+ if (!gr_audit_log_buf) {
77972+ panic("Unable to allocate grsecurity audit log buffer");
77973+ return;
77974+ }
77975+
77976+ /* allocate memory for authentication structure */
77977+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
77978+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
77979+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
77980+
77981+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
77982+ panic("Unable to allocate grsecurity authentication structure");
77983+ return;
77984+ }
77985+
77986+#ifdef CONFIG_GRKERNSEC_IO
77987+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
77988+ grsec_disable_privio = 1;
77989+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
77990+ grsec_disable_privio = 1;
77991+#else
77992+ grsec_disable_privio = 0;
77993+#endif
77994+#endif
77995+
77996+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
77997+ /* for backward compatibility, tpe_invert always defaults to on if
77998+ enabled in the kernel
77999+ */
78000+ grsec_enable_tpe_invert = 1;
78001+#endif
78002+
78003+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
78004+#ifndef CONFIG_GRKERNSEC_SYSCTL
78005+ grsec_lock = 1;
78006+#endif
78007+
78008+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
78009+ grsec_enable_log_rwxmaps = 1;
78010+#endif
78011+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
78012+ grsec_enable_group = 1;
78013+ grsec_audit_gid = KGIDT_INIT(CONFIG_GRKERNSEC_AUDIT_GID);
78014+#endif
78015+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
78016+ grsec_enable_ptrace_readexec = 1;
78017+#endif
78018+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
78019+ grsec_enable_chdir = 1;
78020+#endif
78021+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
78022+ grsec_enable_harden_ptrace = 1;
78023+#endif
78024+#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
78025+ grsec_enable_harden_ipc = 1;
78026+#endif
78027+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
78028+ grsec_enable_mount = 1;
78029+#endif
78030+#ifdef CONFIG_GRKERNSEC_LINK
78031+ grsec_enable_link = 1;
78032+#endif
78033+#ifdef CONFIG_GRKERNSEC_BRUTE
78034+ grsec_enable_brute = 1;
78035+#endif
78036+#ifdef CONFIG_GRKERNSEC_DMESG
78037+ grsec_enable_dmesg = 1;
78038+#endif
78039+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
78040+ grsec_enable_blackhole = 1;
78041+ grsec_lastack_retries = 4;
78042+#endif
78043+#ifdef CONFIG_GRKERNSEC_FIFO
78044+ grsec_enable_fifo = 1;
78045+#endif
78046+#ifdef CONFIG_GRKERNSEC_EXECLOG
78047+ grsec_enable_execlog = 1;
78048+#endif
78049+#ifdef CONFIG_GRKERNSEC_SETXID
78050+ grsec_enable_setxid = 1;
78051+#endif
78052+#ifdef CONFIG_GRKERNSEC_SIGNAL
78053+ grsec_enable_signal = 1;
78054+#endif
78055+#ifdef CONFIG_GRKERNSEC_FORKFAIL
78056+ grsec_enable_forkfail = 1;
78057+#endif
78058+#ifdef CONFIG_GRKERNSEC_TIME
78059+ grsec_enable_time = 1;
78060+#endif
78061+#ifdef CONFIG_GRKERNSEC_RESLOG
78062+ grsec_resource_logging = 1;
78063+#endif
78064+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
78065+ grsec_enable_chroot_findtask = 1;
78066+#endif
78067+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
78068+ grsec_enable_chroot_unix = 1;
78069+#endif
78070+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
78071+ grsec_enable_chroot_mount = 1;
78072+#endif
78073+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
78074+ grsec_enable_chroot_fchdir = 1;
78075+#endif
78076+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
78077+ grsec_enable_chroot_shmat = 1;
78078+#endif
78079+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
78080+ grsec_enable_audit_ptrace = 1;
78081+#endif
78082+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
78083+ grsec_enable_chroot_double = 1;
78084+#endif
78085+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
78086+ grsec_enable_chroot_pivot = 1;
78087+#endif
78088+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
78089+ grsec_enable_chroot_chdir = 1;
78090+#endif
78091+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
78092+ grsec_enable_chroot_chmod = 1;
78093+#endif
78094+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
78095+ grsec_enable_chroot_mknod = 1;
78096+#endif
78097+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
78098+ grsec_enable_chroot_nice = 1;
78099+#endif
78100+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
78101+ grsec_enable_chroot_execlog = 1;
78102+#endif
78103+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
78104+ grsec_enable_chroot_caps = 1;
78105+#endif
78106+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
78107+ grsec_enable_chroot_sysctl = 1;
78108+#endif
78109+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
78110+ grsec_enable_symlinkown = 1;
78111+ grsec_symlinkown_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SYMLINKOWN_GID);
78112+#endif
78113+#ifdef CONFIG_GRKERNSEC_TPE
78114+ grsec_enable_tpe = 1;
78115+ grsec_tpe_gid = KGIDT_INIT(CONFIG_GRKERNSEC_TPE_GID);
78116+#ifdef CONFIG_GRKERNSEC_TPE_ALL
78117+ grsec_enable_tpe_all = 1;
78118+#endif
78119+#endif
78120+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
78121+ grsec_enable_socket_all = 1;
78122+ grsec_socket_all_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_ALL_GID);
78123+#endif
78124+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
78125+ grsec_enable_socket_client = 1;
78126+ grsec_socket_client_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_CLIENT_GID);
78127+#endif
78128+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
78129+ grsec_enable_socket_server = 1;
78130+ grsec_socket_server_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_SERVER_GID);
78131+#endif
78132+#endif
78133+#ifdef CONFIG_GRKERNSEC_DENYUSB_FORCE
78134+ grsec_deny_new_usb = 1;
78135+#endif
78136+
78137+ return;
78138+}
78139diff --git a/grsecurity/grsec_ipc.c b/grsecurity/grsec_ipc.c
78140new file mode 100644
78141index 0000000..1773300
78142--- /dev/null
78143+++ b/grsecurity/grsec_ipc.c
78144@@ -0,0 +1,48 @@
78145+#include <linux/kernel.h>
78146+#include <linux/mm.h>
78147+#include <linux/sched.h>
78148+#include <linux/file.h>
78149+#include <linux/ipc.h>
78150+#include <linux/ipc_namespace.h>
78151+#include <linux/grsecurity.h>
78152+#include <linux/grinternal.h>
78153+
78154+int
78155+gr_ipc_permitted(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, int requested_mode, int granted_mode)
78156+{
78157+#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
78158+ int write;
78159+ int orig_granted_mode;
78160+ kuid_t euid;
78161+ kgid_t egid;
78162+
78163+ if (!grsec_enable_harden_ipc)
78164+ return 1;
78165+
78166+ euid = current_euid();
78167+ egid = current_egid();
78168+
78169+ write = requested_mode & 00002;
78170+ orig_granted_mode = ipcp->mode;
78171+
78172+ if (uid_eq(euid, ipcp->cuid) || uid_eq(euid, ipcp->uid))
78173+ orig_granted_mode >>= 6;
78174+ else {
78175+ /* if likely wrong permissions, lock to user */
78176+ if (orig_granted_mode & 0007)
78177+ orig_granted_mode = 0;
78178+ /* otherwise do a egid-only check */
78179+ else if (gid_eq(egid, ipcp->cgid) || gid_eq(egid, ipcp->gid))
78180+ orig_granted_mode >>= 3;
78181+ /* otherwise, no access */
78182+ else
78183+ orig_granted_mode = 0;
78184+ }
78185+ if (!(requested_mode & ~granted_mode & 0007) && (requested_mode & ~orig_granted_mode & 0007) &&
78186+ !ns_capable_nolog(ns->user_ns, CAP_IPC_OWNER)) {
78187+ gr_log_str_int(GR_DONT_AUDIT, GR_IPC_DENIED_MSG, write ? "write" : "read", GR_GLOBAL_UID(ipcp->cuid));
78188+ return 0;
78189+ }
78190+#endif
78191+ return 1;
78192+}
78193diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
78194new file mode 100644
78195index 0000000..5e05e20
78196--- /dev/null
78197+++ b/grsecurity/grsec_link.c
78198@@ -0,0 +1,58 @@
78199+#include <linux/kernel.h>
78200+#include <linux/sched.h>
78201+#include <linux/fs.h>
78202+#include <linux/file.h>
78203+#include <linux/grinternal.h>
78204+
78205+int gr_handle_symlink_owner(const struct path *link, const struct inode *target)
78206+{
78207+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
78208+ const struct inode *link_inode = link->dentry->d_inode;
78209+
78210+ if (grsec_enable_symlinkown && in_group_p(grsec_symlinkown_gid) &&
78211+ /* ignore root-owned links, e.g. /proc/self */
78212+ gr_is_global_nonroot(link_inode->i_uid) && target &&
78213+ !uid_eq(link_inode->i_uid, target->i_uid)) {
78214+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINKOWNER_MSG, link->dentry, link->mnt, link_inode->i_uid, target->i_uid);
78215+ return 1;
78216+ }
78217+#endif
78218+ return 0;
78219+}
78220+
78221+int
78222+gr_handle_follow_link(const struct inode *parent,
78223+ const struct inode *inode,
78224+ const struct dentry *dentry, const struct vfsmount *mnt)
78225+{
78226+#ifdef CONFIG_GRKERNSEC_LINK
78227+ const struct cred *cred = current_cred();
78228+
78229+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
78230+ (parent->i_mode & S_ISVTX) && !uid_eq(parent->i_uid, inode->i_uid) &&
78231+ (parent->i_mode & S_IWOTH) && !uid_eq(cred->fsuid, inode->i_uid)) {
78232+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
78233+ return -EACCES;
78234+ }
78235+#endif
78236+ return 0;
78237+}
78238+
78239+int
78240+gr_handle_hardlink(const struct dentry *dentry,
78241+ const struct vfsmount *mnt,
78242+ struct inode *inode, const int mode, const struct filename *to)
78243+{
78244+#ifdef CONFIG_GRKERNSEC_LINK
78245+ const struct cred *cred = current_cred();
78246+
78247+ if (grsec_enable_link && !uid_eq(cred->fsuid, inode->i_uid) &&
78248+ (!S_ISREG(mode) || is_privileged_binary(dentry) ||
78249+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
78250+ !capable(CAP_FOWNER) && gr_is_global_nonroot(cred->uid)) {
78251+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to->name);
78252+ return -EPERM;
78253+ }
78254+#endif
78255+ return 0;
78256+}
78257diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
78258new file mode 100644
78259index 0000000..dbe0a6b
78260--- /dev/null
78261+++ b/grsecurity/grsec_log.c
78262@@ -0,0 +1,341 @@
78263+#include <linux/kernel.h>
78264+#include <linux/sched.h>
78265+#include <linux/file.h>
78266+#include <linux/tty.h>
78267+#include <linux/fs.h>
78268+#include <linux/mm.h>
78269+#include <linux/grinternal.h>
78270+
78271+#ifdef CONFIG_TREE_PREEMPT_RCU
78272+#define DISABLE_PREEMPT() preempt_disable()
78273+#define ENABLE_PREEMPT() preempt_enable()
78274+#else
78275+#define DISABLE_PREEMPT()
78276+#define ENABLE_PREEMPT()
78277+#endif
78278+
78279+#define BEGIN_LOCKS(x) \
78280+ DISABLE_PREEMPT(); \
78281+ rcu_read_lock(); \
78282+ read_lock(&tasklist_lock); \
78283+ read_lock(&grsec_exec_file_lock); \
78284+ if (x != GR_DO_AUDIT) \
78285+ spin_lock(&grsec_alert_lock); \
78286+ else \
78287+ spin_lock(&grsec_audit_lock)
78288+
78289+#define END_LOCKS(x) \
78290+ if (x != GR_DO_AUDIT) \
78291+ spin_unlock(&grsec_alert_lock); \
78292+ else \
78293+ spin_unlock(&grsec_audit_lock); \
78294+ read_unlock(&grsec_exec_file_lock); \
78295+ read_unlock(&tasklist_lock); \
78296+ rcu_read_unlock(); \
78297+ ENABLE_PREEMPT(); \
78298+ if (x == GR_DONT_AUDIT) \
78299+ gr_handle_alertkill(current)
78300+
78301+enum {
78302+ FLOODING,
78303+ NO_FLOODING
78304+};
78305+
78306+extern char *gr_alert_log_fmt;
78307+extern char *gr_audit_log_fmt;
78308+extern char *gr_alert_log_buf;
78309+extern char *gr_audit_log_buf;
78310+
78311+static int gr_log_start(int audit)
78312+{
78313+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
78314+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
78315+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
78316+#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
78317+ unsigned long curr_secs = get_seconds();
78318+
78319+ if (audit == GR_DO_AUDIT)
78320+ goto set_fmt;
78321+
78322+ if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
78323+ grsec_alert_wtime = curr_secs;
78324+ grsec_alert_fyet = 0;
78325+ } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
78326+ && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
78327+ grsec_alert_fyet++;
78328+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
78329+ grsec_alert_wtime = curr_secs;
78330+ grsec_alert_fyet++;
78331+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
78332+ return FLOODING;
78333+ }
78334+ else return FLOODING;
78335+
78336+set_fmt:
78337+#endif
78338+ memset(buf, 0, PAGE_SIZE);
78339+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
78340+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
78341+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
78342+ } else if (current->signal->curr_ip) {
78343+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
78344+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
78345+ } else if (gr_acl_is_enabled()) {
78346+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
78347+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
78348+ } else {
78349+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
78350+ strcpy(buf, fmt);
78351+ }
78352+
78353+ return NO_FLOODING;
78354+}
78355+
78356+static void gr_log_middle(int audit, const char *msg, va_list ap)
78357+ __attribute__ ((format (printf, 2, 0)));
78358+
78359+static void gr_log_middle(int audit, const char *msg, va_list ap)
78360+{
78361+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
78362+ unsigned int len = strlen(buf);
78363+
78364+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
78365+
78366+ return;
78367+}
78368+
78369+static void gr_log_middle_varargs(int audit, const char *msg, ...)
78370+ __attribute__ ((format (printf, 2, 3)));
78371+
78372+static void gr_log_middle_varargs(int audit, const char *msg, ...)
78373+{
78374+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
78375+ unsigned int len = strlen(buf);
78376+ va_list ap;
78377+
78378+ va_start(ap, msg);
78379+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
78380+ va_end(ap);
78381+
78382+ return;
78383+}
78384+
78385+static void gr_log_end(int audit, int append_default)
78386+{
78387+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
78388+ if (append_default) {
78389+ struct task_struct *task = current;
78390+ struct task_struct *parent = task->real_parent;
78391+ const struct cred *cred = __task_cred(task);
78392+ const struct cred *pcred = __task_cred(parent);
78393+ unsigned int len = strlen(buf);
78394+
78395+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
78396+ }
78397+
78398+ printk("%s\n", buf);
78399+
78400+ return;
78401+}
78402+
78403+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
78404+{
78405+ int logtype;
78406+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
78407+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
78408+ void *voidptr = NULL;
78409+ int num1 = 0, num2 = 0;
78410+ unsigned long ulong1 = 0, ulong2 = 0;
78411+ struct dentry *dentry = NULL;
78412+ struct vfsmount *mnt = NULL;
78413+ struct file *file = NULL;
78414+ struct task_struct *task = NULL;
78415+ struct vm_area_struct *vma = NULL;
78416+ const struct cred *cred, *pcred;
78417+ va_list ap;
78418+
78419+ BEGIN_LOCKS(audit);
78420+ logtype = gr_log_start(audit);
78421+ if (logtype == FLOODING) {
78422+ END_LOCKS(audit);
78423+ return;
78424+ }
78425+ va_start(ap, argtypes);
78426+ switch (argtypes) {
78427+ case GR_TTYSNIFF:
78428+ task = va_arg(ap, struct task_struct *);
78429+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task_pid_nr(task), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent));
78430+ break;
78431+ case GR_SYSCTL_HIDDEN:
78432+ str1 = va_arg(ap, char *);
78433+ gr_log_middle_varargs(audit, msg, result, str1);
78434+ break;
78435+ case GR_RBAC:
78436+ dentry = va_arg(ap, struct dentry *);
78437+ mnt = va_arg(ap, struct vfsmount *);
78438+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
78439+ break;
78440+ case GR_RBAC_STR:
78441+ dentry = va_arg(ap, struct dentry *);
78442+ mnt = va_arg(ap, struct vfsmount *);
78443+ str1 = va_arg(ap, char *);
78444+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
78445+ break;
78446+ case GR_STR_RBAC:
78447+ str1 = va_arg(ap, char *);
78448+ dentry = va_arg(ap, struct dentry *);
78449+ mnt = va_arg(ap, struct vfsmount *);
78450+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
78451+ break;
78452+ case GR_RBAC_MODE2:
78453+ dentry = va_arg(ap, struct dentry *);
78454+ mnt = va_arg(ap, struct vfsmount *);
78455+ str1 = va_arg(ap, char *);
78456+ str2 = va_arg(ap, char *);
78457+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
78458+ break;
78459+ case GR_RBAC_MODE3:
78460+ dentry = va_arg(ap, struct dentry *);
78461+ mnt = va_arg(ap, struct vfsmount *);
78462+ str1 = va_arg(ap, char *);
78463+ str2 = va_arg(ap, char *);
78464+ str3 = va_arg(ap, char *);
78465+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
78466+ break;
78467+ case GR_FILENAME:
78468+ dentry = va_arg(ap, struct dentry *);
78469+ mnt = va_arg(ap, struct vfsmount *);
78470+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
78471+ break;
78472+ case GR_STR_FILENAME:
78473+ str1 = va_arg(ap, char *);
78474+ dentry = va_arg(ap, struct dentry *);
78475+ mnt = va_arg(ap, struct vfsmount *);
78476+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
78477+ break;
78478+ case GR_FILENAME_STR:
78479+ dentry = va_arg(ap, struct dentry *);
78480+ mnt = va_arg(ap, struct vfsmount *);
78481+ str1 = va_arg(ap, char *);
78482+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
78483+ break;
78484+ case GR_FILENAME_TWO_INT:
78485+ dentry = va_arg(ap, struct dentry *);
78486+ mnt = va_arg(ap, struct vfsmount *);
78487+ num1 = va_arg(ap, int);
78488+ num2 = va_arg(ap, int);
78489+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
78490+ break;
78491+ case GR_FILENAME_TWO_INT_STR:
78492+ dentry = va_arg(ap, struct dentry *);
78493+ mnt = va_arg(ap, struct vfsmount *);
78494+ num1 = va_arg(ap, int);
78495+ num2 = va_arg(ap, int);
78496+ str1 = va_arg(ap, char *);
78497+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
78498+ break;
78499+ case GR_TEXTREL:
78500+ file = va_arg(ap, struct file *);
78501+ ulong1 = va_arg(ap, unsigned long);
78502+ ulong2 = va_arg(ap, unsigned long);
78503+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
78504+ break;
78505+ case GR_PTRACE:
78506+ task = va_arg(ap, struct task_struct *);
78507+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task_pid_nr(task));
78508+ break;
78509+ case GR_RESOURCE:
78510+ task = va_arg(ap, struct task_struct *);
78511+ cred = __task_cred(task);
78512+ pcred = __task_cred(task->real_parent);
78513+ ulong1 = va_arg(ap, unsigned long);
78514+ str1 = va_arg(ap, char *);
78515+ ulong2 = va_arg(ap, unsigned long);
78516+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
78517+ break;
78518+ case GR_CAP:
78519+ task = va_arg(ap, struct task_struct *);
78520+ cred = __task_cred(task);
78521+ pcred = __task_cred(task->real_parent);
78522+ str1 = va_arg(ap, char *);
78523+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
78524+ break;
78525+ case GR_SIG:
78526+ str1 = va_arg(ap, char *);
78527+ voidptr = va_arg(ap, void *);
78528+ gr_log_middle_varargs(audit, msg, str1, voidptr);
78529+ break;
78530+ case GR_SIG2:
78531+ task = va_arg(ap, struct task_struct *);
78532+ cred = __task_cred(task);
78533+ pcred = __task_cred(task->real_parent);
78534+ num1 = va_arg(ap, int);
78535+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
78536+ break;
78537+ case GR_CRASH1:
78538+ task = va_arg(ap, struct task_struct *);
78539+ cred = __task_cred(task);
78540+ pcred = __task_cred(task->real_parent);
78541+ ulong1 = va_arg(ap, unsigned long);
78542+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), GR_GLOBAL_UID(cred->uid), ulong1);
78543+ break;
78544+ case GR_CRASH2:
78545+ task = va_arg(ap, struct task_struct *);
78546+ cred = __task_cred(task);
78547+ pcred = __task_cred(task->real_parent);
78548+ ulong1 = va_arg(ap, unsigned long);
78549+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), ulong1);
78550+ break;
78551+ case GR_RWXMAP:
78552+ file = va_arg(ap, struct file *);
78553+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
78554+ break;
78555+ case GR_RWXMAPVMA:
78556+ vma = va_arg(ap, struct vm_area_struct *);
78557+ if (vma->vm_file)
78558+ str1 = gr_to_filename(vma->vm_file->f_path.dentry, vma->vm_file->f_path.mnt);
78559+ else if (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP))
78560+ str1 = "<stack>";
78561+ else if (vma->vm_start <= current->mm->brk &&
78562+ vma->vm_end >= current->mm->start_brk)
78563+ str1 = "<heap>";
78564+ else
78565+ str1 = "<anonymous mapping>";
78566+ gr_log_middle_varargs(audit, msg, str1);
78567+ break;
78568+ case GR_PSACCT:
78569+ {
78570+ unsigned int wday, cday;
78571+ __u8 whr, chr;
78572+ __u8 wmin, cmin;
78573+ __u8 wsec, csec;
78574+ char cur_tty[64] = { 0 };
78575+ char parent_tty[64] = { 0 };
78576+
78577+ task = va_arg(ap, struct task_struct *);
78578+ wday = va_arg(ap, unsigned int);
78579+ cday = va_arg(ap, unsigned int);
78580+ whr = va_arg(ap, int);
78581+ chr = va_arg(ap, int);
78582+ wmin = va_arg(ap, int);
78583+ cmin = va_arg(ap, int);
78584+ wsec = va_arg(ap, int);
78585+ csec = va_arg(ap, int);
78586+ ulong1 = va_arg(ap, unsigned long);
78587+ cred = __task_cred(task);
78588+ pcred = __task_cred(task->real_parent);
78589+
78590+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
78591+ }
78592+ break;
78593+ default:
78594+ gr_log_middle(audit, msg, ap);
78595+ }
78596+ va_end(ap);
78597+ // these don't need DEFAULTSECARGS printed on the end
78598+ if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
78599+ gr_log_end(audit, 0);
78600+ else
78601+ gr_log_end(audit, 1);
78602+ END_LOCKS(audit);
78603+}
78604diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
78605new file mode 100644
78606index 0000000..0e39d8c
78607--- /dev/null
78608+++ b/grsecurity/grsec_mem.c
78609@@ -0,0 +1,48 @@
78610+#include <linux/kernel.h>
78611+#include <linux/sched.h>
78612+#include <linux/mm.h>
78613+#include <linux/mman.h>
78614+#include <linux/module.h>
78615+#include <linux/grinternal.h>
78616+
78617+void gr_handle_msr_write(void)
78618+{
78619+ gr_log_noargs(GR_DONT_AUDIT, GR_MSRWRITE_MSG);
78620+ return;
78621+}
78622+EXPORT_SYMBOL_GPL(gr_handle_msr_write);
78623+
78624+void
78625+gr_handle_ioperm(void)
78626+{
78627+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
78628+ return;
78629+}
78630+
78631+void
78632+gr_handle_iopl(void)
78633+{
78634+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
78635+ return;
78636+}
78637+
78638+void
78639+gr_handle_mem_readwrite(u64 from, u64 to)
78640+{
78641+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
78642+ return;
78643+}
78644+
78645+void
78646+gr_handle_vm86(void)
78647+{
78648+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
78649+ return;
78650+}
78651+
78652+void
78653+gr_log_badprocpid(const char *entry)
78654+{
78655+ gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
78656+ return;
78657+}
78658diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
78659new file mode 100644
78660index 0000000..cd9e124
78661--- /dev/null
78662+++ b/grsecurity/grsec_mount.c
78663@@ -0,0 +1,65 @@
78664+#include <linux/kernel.h>
78665+#include <linux/sched.h>
78666+#include <linux/mount.h>
78667+#include <linux/major.h>
78668+#include <linux/grsecurity.h>
78669+#include <linux/grinternal.h>
78670+
78671+void
78672+gr_log_remount(const char *devname, const int retval)
78673+{
78674+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
78675+ if (grsec_enable_mount && (retval >= 0))
78676+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
78677+#endif
78678+ return;
78679+}
78680+
78681+void
78682+gr_log_unmount(const char *devname, const int retval)
78683+{
78684+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
78685+ if (grsec_enable_mount && (retval >= 0))
78686+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
78687+#endif
78688+ return;
78689+}
78690+
78691+void
78692+gr_log_mount(const char *from, const char *to, const int retval)
78693+{
78694+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
78695+ if (grsec_enable_mount && (retval >= 0))
78696+ gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
78697+#endif
78698+ return;
78699+}
78700+
78701+int
78702+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
78703+{
78704+#ifdef CONFIG_GRKERNSEC_ROFS
78705+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
78706+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
78707+ return -EPERM;
78708+ } else
78709+ return 0;
78710+#endif
78711+ return 0;
78712+}
78713+
78714+int
78715+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
78716+{
78717+#ifdef CONFIG_GRKERNSEC_ROFS
78718+ struct inode *inode = dentry->d_inode;
78719+
78720+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
78721+ inode && (S_ISBLK(inode->i_mode) || (S_ISCHR(inode->i_mode) && imajor(inode) == RAW_MAJOR))) {
78722+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
78723+ return -EPERM;
78724+ } else
78725+ return 0;
78726+#endif
78727+ return 0;
78728+}
78729diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
78730new file mode 100644
78731index 0000000..6ee9d50
78732--- /dev/null
78733+++ b/grsecurity/grsec_pax.c
78734@@ -0,0 +1,45 @@
78735+#include <linux/kernel.h>
78736+#include <linux/sched.h>
78737+#include <linux/mm.h>
78738+#include <linux/file.h>
78739+#include <linux/grinternal.h>
78740+#include <linux/grsecurity.h>
78741+
78742+void
78743+gr_log_textrel(struct vm_area_struct * vma)
78744+{
78745+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
78746+ if (grsec_enable_log_rwxmaps)
78747+ gr_log_textrel_ulong_ulong(GR_DONT_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
78748+#endif
78749+ return;
78750+}
78751+
78752+void gr_log_ptgnustack(struct file *file)
78753+{
78754+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
78755+ if (grsec_enable_log_rwxmaps)
78756+ gr_log_rwxmap(GR_DONT_AUDIT, GR_PTGNUSTACK_MSG, file);
78757+#endif
78758+ return;
78759+}
78760+
78761+void
78762+gr_log_rwxmmap(struct file *file)
78763+{
78764+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
78765+ if (grsec_enable_log_rwxmaps)
78766+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
78767+#endif
78768+ return;
78769+}
78770+
78771+void
78772+gr_log_rwxmprotect(struct vm_area_struct *vma)
78773+{
78774+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
78775+ if (grsec_enable_log_rwxmaps)
78776+ gr_log_rwxmap_vma(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, vma);
78777+#endif
78778+ return;
78779+}
78780diff --git a/grsecurity/grsec_proc.c b/grsecurity/grsec_proc.c
78781new file mode 100644
78782index 0000000..2005a3a
78783--- /dev/null
78784+++ b/grsecurity/grsec_proc.c
78785@@ -0,0 +1,20 @@
78786+#include <linux/kernel.h>
78787+#include <linux/sched.h>
78788+#include <linux/grsecurity.h>
78789+#include <linux/grinternal.h>
78790+
78791+int gr_proc_is_restricted(void)
78792+{
78793+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
78794+ const struct cred *cred = current_cred();
78795+#endif
78796+
78797+#ifdef CONFIG_GRKERNSEC_PROC_USER
78798+ if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID))
78799+ return -EACCES;
78800+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
78801+ if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID) && !in_group_p(grsec_proc_gid))
78802+ return -EACCES;
78803+#endif
78804+ return 0;
78805+}
78806diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
78807new file mode 100644
78808index 0000000..f7f29aa
78809--- /dev/null
78810+++ b/grsecurity/grsec_ptrace.c
78811@@ -0,0 +1,30 @@
78812+#include <linux/kernel.h>
78813+#include <linux/sched.h>
78814+#include <linux/grinternal.h>
78815+#include <linux/security.h>
78816+
78817+void
78818+gr_audit_ptrace(struct task_struct *task)
78819+{
78820+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
78821+ if (grsec_enable_audit_ptrace)
78822+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
78823+#endif
78824+ return;
78825+}
78826+
78827+int
78828+gr_ptrace_readexec(struct file *file, int unsafe_flags)
78829+{
78830+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
78831+ const struct dentry *dentry = file->f_path.dentry;
78832+ const struct vfsmount *mnt = file->f_path.mnt;
78833+
78834+ if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
78835+ (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
78836+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
78837+ return -EACCES;
78838+ }
78839+#endif
78840+ return 0;
78841+}
78842diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
78843new file mode 100644
78844index 0000000..3860c7e
78845--- /dev/null
78846+++ b/grsecurity/grsec_sig.c
78847@@ -0,0 +1,236 @@
78848+#include <linux/kernel.h>
78849+#include <linux/sched.h>
78850+#include <linux/fs.h>
78851+#include <linux/delay.h>
78852+#include <linux/grsecurity.h>
78853+#include <linux/grinternal.h>
78854+#include <linux/hardirq.h>
78855+
78856+char *signames[] = {
78857+ [SIGSEGV] = "Segmentation fault",
78858+ [SIGILL] = "Illegal instruction",
78859+ [SIGABRT] = "Abort",
78860+ [SIGBUS] = "Invalid alignment/Bus error"
78861+};
78862+
78863+void
78864+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
78865+{
78866+#ifdef CONFIG_GRKERNSEC_SIGNAL
78867+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
78868+ (sig == SIGABRT) || (sig == SIGBUS))) {
78869+ if (task_pid_nr(t) == task_pid_nr(current)) {
78870+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
78871+ } else {
78872+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
78873+ }
78874+ }
78875+#endif
78876+ return;
78877+}
78878+
78879+int
78880+gr_handle_signal(const struct task_struct *p, const int sig)
78881+{
78882+#ifdef CONFIG_GRKERNSEC
78883+ /* ignore the 0 signal for protected task checks */
78884+ if (task_pid_nr(current) > 1 && sig && gr_check_protected_task(p)) {
78885+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
78886+ return -EPERM;
78887+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
78888+ return -EPERM;
78889+ }
78890+#endif
78891+ return 0;
78892+}
78893+
78894+#ifdef CONFIG_GRKERNSEC
78895+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
78896+
78897+int gr_fake_force_sig(int sig, struct task_struct *t)
78898+{
78899+ unsigned long int flags;
78900+ int ret, blocked, ignored;
78901+ struct k_sigaction *action;
78902+
78903+ spin_lock_irqsave(&t->sighand->siglock, flags);
78904+ action = &t->sighand->action[sig-1];
78905+ ignored = action->sa.sa_handler == SIG_IGN;
78906+ blocked = sigismember(&t->blocked, sig);
78907+ if (blocked || ignored) {
78908+ action->sa.sa_handler = SIG_DFL;
78909+ if (blocked) {
78910+ sigdelset(&t->blocked, sig);
78911+ recalc_sigpending_and_wake(t);
78912+ }
78913+ }
78914+ if (action->sa.sa_handler == SIG_DFL)
78915+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
78916+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
78917+
78918+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
78919+
78920+ return ret;
78921+}
78922+#endif
78923+
78924+#define GR_USER_BAN_TIME (15 * 60)
78925+#define GR_DAEMON_BRUTE_TIME (30 * 60)
78926+
78927+void gr_handle_brute_attach(int dumpable)
78928+{
78929+#ifdef CONFIG_GRKERNSEC_BRUTE
78930+ struct task_struct *p = current;
78931+ kuid_t uid = GLOBAL_ROOT_UID;
78932+ int daemon = 0;
78933+
78934+ if (!grsec_enable_brute)
78935+ return;
78936+
78937+ rcu_read_lock();
78938+ read_lock(&tasklist_lock);
78939+ read_lock(&grsec_exec_file_lock);
78940+ if (p->real_parent && gr_is_same_file(p->real_parent->exec_file, p->exec_file)) {
78941+ p->real_parent->brute_expires = get_seconds() + GR_DAEMON_BRUTE_TIME;
78942+ p->real_parent->brute = 1;
78943+ daemon = 1;
78944+ } else {
78945+ const struct cred *cred = __task_cred(p), *cred2;
78946+ struct task_struct *tsk, *tsk2;
78947+
78948+ if (dumpable != SUID_DUMP_USER && gr_is_global_nonroot(cred->uid)) {
78949+ struct user_struct *user;
78950+
78951+ uid = cred->uid;
78952+
78953+ /* this is put upon execution past expiration */
78954+ user = find_user(uid);
78955+ if (user == NULL)
78956+ goto unlock;
78957+ user->suid_banned = 1;
78958+ user->suid_ban_expires = get_seconds() + GR_USER_BAN_TIME;
78959+ if (user->suid_ban_expires == ~0UL)
78960+ user->suid_ban_expires--;
78961+
78962+ /* only kill other threads of the same binary, from the same user */
78963+ do_each_thread(tsk2, tsk) {
78964+ cred2 = __task_cred(tsk);
78965+ if (tsk != p && uid_eq(cred2->uid, uid) && gr_is_same_file(tsk->exec_file, p->exec_file))
78966+ gr_fake_force_sig(SIGKILL, tsk);
78967+ } while_each_thread(tsk2, tsk);
78968+ }
78969+ }
78970+unlock:
78971+ read_unlock(&grsec_exec_file_lock);
78972+ read_unlock(&tasklist_lock);
78973+ rcu_read_unlock();
78974+
78975+ if (gr_is_global_nonroot(uid))
78976+ gr_log_fs_int2(GR_DONT_AUDIT, GR_BRUTE_SUID_MSG, p->exec_file->f_path.dentry, p->exec_file->f_path.mnt, GR_GLOBAL_UID(uid), GR_USER_BAN_TIME / 60);
78977+ else if (daemon)
78978+ gr_log_noargs(GR_DONT_AUDIT, GR_BRUTE_DAEMON_MSG);
78979+
78980+#endif
78981+ return;
78982+}
78983+
78984+void gr_handle_brute_check(void)
78985+{
78986+#ifdef CONFIG_GRKERNSEC_BRUTE
78987+ struct task_struct *p = current;
78988+
78989+ if (unlikely(p->brute)) {
78990+ if (!grsec_enable_brute)
78991+ p->brute = 0;
78992+ else if (time_before(get_seconds(), p->brute_expires))
78993+ msleep(30 * 1000);
78994+ }
78995+#endif
78996+ return;
78997+}
78998+
78999+void gr_handle_kernel_exploit(void)
79000+{
79001+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
79002+ const struct cred *cred;
79003+ struct task_struct *tsk, *tsk2;
79004+ struct user_struct *user;
79005+ kuid_t uid;
79006+
79007+ if (in_irq() || in_serving_softirq() || in_nmi())
79008+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
79009+
79010+ uid = current_uid();
79011+
79012+ if (gr_is_global_root(uid))
79013+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
79014+ else {
79015+ /* kill all the processes of this user, hold a reference
79016+ to their creds struct, and prevent them from creating
79017+ another process until system reset
79018+ */
79019+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n",
79020+ GR_GLOBAL_UID(uid));
79021+ /* we intentionally leak this ref */
79022+ user = get_uid(current->cred->user);
79023+ if (user)
79024+ user->kernel_banned = 1;
79025+
79026+ /* kill all processes of this user */
79027+ read_lock(&tasklist_lock);
79028+ do_each_thread(tsk2, tsk) {
79029+ cred = __task_cred(tsk);
79030+ if (uid_eq(cred->uid, uid))
79031+ gr_fake_force_sig(SIGKILL, tsk);
79032+ } while_each_thread(tsk2, tsk);
79033+ read_unlock(&tasklist_lock);
79034+ }
79035+#endif
79036+}
79037+
79038+#ifdef CONFIG_GRKERNSEC_BRUTE
79039+static bool suid_ban_expired(struct user_struct *user)
79040+{
79041+ if (user->suid_ban_expires != ~0UL && time_after_eq(get_seconds(), user->suid_ban_expires)) {
79042+ user->suid_banned = 0;
79043+ user->suid_ban_expires = 0;
79044+ free_uid(user);
79045+ return true;
79046+ }
79047+
79048+ return false;
79049+}
79050+#endif
79051+
79052+int gr_process_kernel_exec_ban(void)
79053+{
79054+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
79055+ if (unlikely(current->cred->user->kernel_banned))
79056+ return -EPERM;
79057+#endif
79058+ return 0;
79059+}
79060+
79061+int gr_process_kernel_setuid_ban(struct user_struct *user)
79062+{
79063+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
79064+ if (unlikely(user->kernel_banned))
79065+ gr_fake_force_sig(SIGKILL, current);
79066+#endif
79067+ return 0;
79068+}
79069+
79070+int gr_process_suid_exec_ban(const struct linux_binprm *bprm)
79071+{
79072+#ifdef CONFIG_GRKERNSEC_BRUTE
79073+ struct user_struct *user = current->cred->user;
79074+ if (unlikely(user->suid_banned)) {
79075+ if (suid_ban_expired(user))
79076+ return 0;
79077+ /* disallow execution of suid binaries only */
79078+ else if (!uid_eq(bprm->cred->euid, current->cred->uid))
79079+ return -EPERM;
79080+ }
79081+#endif
79082+ return 0;
79083+}
79084diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
79085new file mode 100644
79086index 0000000..e3650b6
79087--- /dev/null
79088+++ b/grsecurity/grsec_sock.c
79089@@ -0,0 +1,244 @@
79090+#include <linux/kernel.h>
79091+#include <linux/module.h>
79092+#include <linux/sched.h>
79093+#include <linux/file.h>
79094+#include <linux/net.h>
79095+#include <linux/in.h>
79096+#include <linux/ip.h>
79097+#include <net/sock.h>
79098+#include <net/inet_sock.h>
79099+#include <linux/grsecurity.h>
79100+#include <linux/grinternal.h>
79101+#include <linux/gracl.h>
79102+
79103+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
79104+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
79105+
79106+EXPORT_SYMBOL_GPL(gr_search_udp_recvmsg);
79107+EXPORT_SYMBOL_GPL(gr_search_udp_sendmsg);
79108+
79109+#ifdef CONFIG_UNIX_MODULE
79110+EXPORT_SYMBOL_GPL(gr_acl_handle_unix);
79111+EXPORT_SYMBOL_GPL(gr_acl_handle_mknod);
79112+EXPORT_SYMBOL_GPL(gr_handle_chroot_unix);
79113+EXPORT_SYMBOL_GPL(gr_handle_create);
79114+#endif
79115+
79116+#ifdef CONFIG_GRKERNSEC
79117+#define gr_conn_table_size 32749
79118+struct conn_table_entry {
79119+ struct conn_table_entry *next;
79120+ struct signal_struct *sig;
79121+};
79122+
79123+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
79124+DEFINE_SPINLOCK(gr_conn_table_lock);
79125+
79126+extern const char * gr_socktype_to_name(unsigned char type);
79127+extern const char * gr_proto_to_name(unsigned char proto);
79128+extern const char * gr_sockfamily_to_name(unsigned char family);
79129+
79130+static __inline__ int
79131+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
79132+{
79133+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
79134+}
79135+
79136+static __inline__ int
79137+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
79138+ __u16 sport, __u16 dport)
79139+{
79140+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
79141+ sig->gr_sport == sport && sig->gr_dport == dport))
79142+ return 1;
79143+ else
79144+ return 0;
79145+}
79146+
79147+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
79148+{
79149+ struct conn_table_entry **match;
79150+ unsigned int index;
79151+
79152+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
79153+ sig->gr_sport, sig->gr_dport,
79154+ gr_conn_table_size);
79155+
79156+ newent->sig = sig;
79157+
79158+ match = &gr_conn_table[index];
79159+ newent->next = *match;
79160+ *match = newent;
79161+
79162+ return;
79163+}
79164+
79165+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
79166+{
79167+ struct conn_table_entry *match, *last = NULL;
79168+ unsigned int index;
79169+
79170+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
79171+ sig->gr_sport, sig->gr_dport,
79172+ gr_conn_table_size);
79173+
79174+ match = gr_conn_table[index];
79175+ while (match && !conn_match(match->sig,
79176+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
79177+ sig->gr_dport)) {
79178+ last = match;
79179+ match = match->next;
79180+ }
79181+
79182+ if (match) {
79183+ if (last)
79184+ last->next = match->next;
79185+ else
79186+ gr_conn_table[index] = NULL;
79187+ kfree(match);
79188+ }
79189+
79190+ return;
79191+}
79192+
79193+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
79194+ __u16 sport, __u16 dport)
79195+{
79196+ struct conn_table_entry *match;
79197+ unsigned int index;
79198+
79199+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
79200+
79201+ match = gr_conn_table[index];
79202+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
79203+ match = match->next;
79204+
79205+ if (match)
79206+ return match->sig;
79207+ else
79208+ return NULL;
79209+}
79210+
79211+#endif
79212+
79213+void gr_update_task_in_ip_table(const struct inet_sock *inet)
79214+{
79215+#ifdef CONFIG_GRKERNSEC
79216+ struct signal_struct *sig = current->signal;
79217+ struct conn_table_entry *newent;
79218+
79219+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
79220+ if (newent == NULL)
79221+ return;
79222+ /* no bh lock needed since we are called with bh disabled */
79223+ spin_lock(&gr_conn_table_lock);
79224+ gr_del_task_from_ip_table_nolock(sig);
79225+ sig->gr_saddr = inet->inet_rcv_saddr;
79226+ sig->gr_daddr = inet->inet_daddr;
79227+ sig->gr_sport = inet->inet_sport;
79228+ sig->gr_dport = inet->inet_dport;
79229+ gr_add_to_task_ip_table_nolock(sig, newent);
79230+ spin_unlock(&gr_conn_table_lock);
79231+#endif
79232+ return;
79233+}
79234+
79235+void gr_del_task_from_ip_table(struct task_struct *task)
79236+{
79237+#ifdef CONFIG_GRKERNSEC
79238+ spin_lock_bh(&gr_conn_table_lock);
79239+ gr_del_task_from_ip_table_nolock(task->signal);
79240+ spin_unlock_bh(&gr_conn_table_lock);
79241+#endif
79242+ return;
79243+}
79244+
79245+void
79246+gr_attach_curr_ip(const struct sock *sk)
79247+{
79248+#ifdef CONFIG_GRKERNSEC
79249+ struct signal_struct *p, *set;
79250+ const struct inet_sock *inet = inet_sk(sk);
79251+
79252+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
79253+ return;
79254+
79255+ set = current->signal;
79256+
79257+ spin_lock_bh(&gr_conn_table_lock);
79258+ p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
79259+ inet->inet_dport, inet->inet_sport);
79260+ if (unlikely(p != NULL)) {
79261+ set->curr_ip = p->curr_ip;
79262+ set->used_accept = 1;
79263+ gr_del_task_from_ip_table_nolock(p);
79264+ spin_unlock_bh(&gr_conn_table_lock);
79265+ return;
79266+ }
79267+ spin_unlock_bh(&gr_conn_table_lock);
79268+
79269+ set->curr_ip = inet->inet_daddr;
79270+ set->used_accept = 1;
79271+#endif
79272+ return;
79273+}
79274+
79275+int
79276+gr_handle_sock_all(const int family, const int type, const int protocol)
79277+{
79278+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
79279+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
79280+ (family != AF_UNIX)) {
79281+ if (family == AF_INET)
79282+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
79283+ else
79284+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
79285+ return -EACCES;
79286+ }
79287+#endif
79288+ return 0;
79289+}
79290+
79291+int
79292+gr_handle_sock_server(const struct sockaddr *sck)
79293+{
79294+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
79295+ if (grsec_enable_socket_server &&
79296+ in_group_p(grsec_socket_server_gid) &&
79297+ sck && (sck->sa_family != AF_UNIX) &&
79298+ (sck->sa_family != AF_LOCAL)) {
79299+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
79300+ return -EACCES;
79301+ }
79302+#endif
79303+ return 0;
79304+}
79305+
79306+int
79307+gr_handle_sock_server_other(const struct sock *sck)
79308+{
79309+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
79310+ if (grsec_enable_socket_server &&
79311+ in_group_p(grsec_socket_server_gid) &&
79312+ sck && (sck->sk_family != AF_UNIX) &&
79313+ (sck->sk_family != AF_LOCAL)) {
79314+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
79315+ return -EACCES;
79316+ }
79317+#endif
79318+ return 0;
79319+}
79320+
79321+int
79322+gr_handle_sock_client(const struct sockaddr *sck)
79323+{
79324+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
79325+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
79326+ sck && (sck->sa_family != AF_UNIX) &&
79327+ (sck->sa_family != AF_LOCAL)) {
79328+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
79329+ return -EACCES;
79330+ }
79331+#endif
79332+ return 0;
79333+}
79334diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
79335new file mode 100644
79336index 0000000..8159888
79337--- /dev/null
79338+++ b/grsecurity/grsec_sysctl.c
79339@@ -0,0 +1,479 @@
79340+#include <linux/kernel.h>
79341+#include <linux/sched.h>
79342+#include <linux/sysctl.h>
79343+#include <linux/grsecurity.h>
79344+#include <linux/grinternal.h>
79345+
79346+int
79347+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
79348+{
79349+#ifdef CONFIG_GRKERNSEC_SYSCTL
79350+ if (dirname == NULL || name == NULL)
79351+ return 0;
79352+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
79353+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
79354+ return -EACCES;
79355+ }
79356+#endif
79357+ return 0;
79358+}
79359+
79360+#if defined(CONFIG_GRKERNSEC_ROFS) || defined(CONFIG_GRKERNSEC_DENYUSB)
79361+static int __maybe_unused __read_only one = 1;
79362+#endif
79363+
79364+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS) || \
79365+ defined(CONFIG_GRKERNSEC_DENYUSB)
79366+struct ctl_table grsecurity_table[] = {
79367+#ifdef CONFIG_GRKERNSEC_SYSCTL
79368+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
79369+#ifdef CONFIG_GRKERNSEC_IO
79370+ {
79371+ .procname = "disable_priv_io",
79372+ .data = &grsec_disable_privio,
79373+ .maxlen = sizeof(int),
79374+ .mode = 0600,
79375+ .proc_handler = &proc_dointvec,
79376+ },
79377+#endif
79378+#endif
79379+#ifdef CONFIG_GRKERNSEC_LINK
79380+ {
79381+ .procname = "linking_restrictions",
79382+ .data = &grsec_enable_link,
79383+ .maxlen = sizeof(int),
79384+ .mode = 0600,
79385+ .proc_handler = &proc_dointvec,
79386+ },
79387+#endif
79388+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
79389+ {
79390+ .procname = "enforce_symlinksifowner",
79391+ .data = &grsec_enable_symlinkown,
79392+ .maxlen = sizeof(int),
79393+ .mode = 0600,
79394+ .proc_handler = &proc_dointvec,
79395+ },
79396+ {
79397+ .procname = "symlinkown_gid",
79398+ .data = &grsec_symlinkown_gid,
79399+ .maxlen = sizeof(int),
79400+ .mode = 0600,
79401+ .proc_handler = &proc_dointvec,
79402+ },
79403+#endif
79404+#ifdef CONFIG_GRKERNSEC_BRUTE
79405+ {
79406+ .procname = "deter_bruteforce",
79407+ .data = &grsec_enable_brute,
79408+ .maxlen = sizeof(int),
79409+ .mode = 0600,
79410+ .proc_handler = &proc_dointvec,
79411+ },
79412+#endif
79413+#ifdef CONFIG_GRKERNSEC_FIFO
79414+ {
79415+ .procname = "fifo_restrictions",
79416+ .data = &grsec_enable_fifo,
79417+ .maxlen = sizeof(int),
79418+ .mode = 0600,
79419+ .proc_handler = &proc_dointvec,
79420+ },
79421+#endif
79422+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
79423+ {
79424+ .procname = "ptrace_readexec",
79425+ .data = &grsec_enable_ptrace_readexec,
79426+ .maxlen = sizeof(int),
79427+ .mode = 0600,
79428+ .proc_handler = &proc_dointvec,
79429+ },
79430+#endif
79431+#ifdef CONFIG_GRKERNSEC_SETXID
79432+ {
79433+ .procname = "consistent_setxid",
79434+ .data = &grsec_enable_setxid,
79435+ .maxlen = sizeof(int),
79436+ .mode = 0600,
79437+ .proc_handler = &proc_dointvec,
79438+ },
79439+#endif
79440+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
79441+ {
79442+ .procname = "ip_blackhole",
79443+ .data = &grsec_enable_blackhole,
79444+ .maxlen = sizeof(int),
79445+ .mode = 0600,
79446+ .proc_handler = &proc_dointvec,
79447+ },
79448+ {
79449+ .procname = "lastack_retries",
79450+ .data = &grsec_lastack_retries,
79451+ .maxlen = sizeof(int),
79452+ .mode = 0600,
79453+ .proc_handler = &proc_dointvec,
79454+ },
79455+#endif
79456+#ifdef CONFIG_GRKERNSEC_EXECLOG
79457+ {
79458+ .procname = "exec_logging",
79459+ .data = &grsec_enable_execlog,
79460+ .maxlen = sizeof(int),
79461+ .mode = 0600,
79462+ .proc_handler = &proc_dointvec,
79463+ },
79464+#endif
79465+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
79466+ {
79467+ .procname = "rwxmap_logging",
79468+ .data = &grsec_enable_log_rwxmaps,
79469+ .maxlen = sizeof(int),
79470+ .mode = 0600,
79471+ .proc_handler = &proc_dointvec,
79472+ },
79473+#endif
79474+#ifdef CONFIG_GRKERNSEC_SIGNAL
79475+ {
79476+ .procname = "signal_logging",
79477+ .data = &grsec_enable_signal,
79478+ .maxlen = sizeof(int),
79479+ .mode = 0600,
79480+ .proc_handler = &proc_dointvec,
79481+ },
79482+#endif
79483+#ifdef CONFIG_GRKERNSEC_FORKFAIL
79484+ {
79485+ .procname = "forkfail_logging",
79486+ .data = &grsec_enable_forkfail,
79487+ .maxlen = sizeof(int),
79488+ .mode = 0600,
79489+ .proc_handler = &proc_dointvec,
79490+ },
79491+#endif
79492+#ifdef CONFIG_GRKERNSEC_TIME
79493+ {
79494+ .procname = "timechange_logging",
79495+ .data = &grsec_enable_time,
79496+ .maxlen = sizeof(int),
79497+ .mode = 0600,
79498+ .proc_handler = &proc_dointvec,
79499+ },
79500+#endif
79501+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
79502+ {
79503+ .procname = "chroot_deny_shmat",
79504+ .data = &grsec_enable_chroot_shmat,
79505+ .maxlen = sizeof(int),
79506+ .mode = 0600,
79507+ .proc_handler = &proc_dointvec,
79508+ },
79509+#endif
79510+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
79511+ {
79512+ .procname = "chroot_deny_unix",
79513+ .data = &grsec_enable_chroot_unix,
79514+ .maxlen = sizeof(int),
79515+ .mode = 0600,
79516+ .proc_handler = &proc_dointvec,
79517+ },
79518+#endif
79519+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
79520+ {
79521+ .procname = "chroot_deny_mount",
79522+ .data = &grsec_enable_chroot_mount,
79523+ .maxlen = sizeof(int),
79524+ .mode = 0600,
79525+ .proc_handler = &proc_dointvec,
79526+ },
79527+#endif
79528+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
79529+ {
79530+ .procname = "chroot_deny_fchdir",
79531+ .data = &grsec_enable_chroot_fchdir,
79532+ .maxlen = sizeof(int),
79533+ .mode = 0600,
79534+ .proc_handler = &proc_dointvec,
79535+ },
79536+#endif
79537+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
79538+ {
79539+ .procname = "chroot_deny_chroot",
79540+ .data = &grsec_enable_chroot_double,
79541+ .maxlen = sizeof(int),
79542+ .mode = 0600,
79543+ .proc_handler = &proc_dointvec,
79544+ },
79545+#endif
79546+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
79547+ {
79548+ .procname = "chroot_deny_pivot",
79549+ .data = &grsec_enable_chroot_pivot,
79550+ .maxlen = sizeof(int),
79551+ .mode = 0600,
79552+ .proc_handler = &proc_dointvec,
79553+ },
79554+#endif
79555+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
79556+ {
79557+ .procname = "chroot_enforce_chdir",
79558+ .data = &grsec_enable_chroot_chdir,
79559+ .maxlen = sizeof(int),
79560+ .mode = 0600,
79561+ .proc_handler = &proc_dointvec,
79562+ },
79563+#endif
79564+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
79565+ {
79566+ .procname = "chroot_deny_chmod",
79567+ .data = &grsec_enable_chroot_chmod,
79568+ .maxlen = sizeof(int),
79569+ .mode = 0600,
79570+ .proc_handler = &proc_dointvec,
79571+ },
79572+#endif
79573+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
79574+ {
79575+ .procname = "chroot_deny_mknod",
79576+ .data = &grsec_enable_chroot_mknod,
79577+ .maxlen = sizeof(int),
79578+ .mode = 0600,
79579+ .proc_handler = &proc_dointvec,
79580+ },
79581+#endif
79582+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
79583+ {
79584+ .procname = "chroot_restrict_nice",
79585+ .data = &grsec_enable_chroot_nice,
79586+ .maxlen = sizeof(int),
79587+ .mode = 0600,
79588+ .proc_handler = &proc_dointvec,
79589+ },
79590+#endif
79591+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
79592+ {
79593+ .procname = "chroot_execlog",
79594+ .data = &grsec_enable_chroot_execlog,
79595+ .maxlen = sizeof(int),
79596+ .mode = 0600,
79597+ .proc_handler = &proc_dointvec,
79598+ },
79599+#endif
79600+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
79601+ {
79602+ .procname = "chroot_caps",
79603+ .data = &grsec_enable_chroot_caps,
79604+ .maxlen = sizeof(int),
79605+ .mode = 0600,
79606+ .proc_handler = &proc_dointvec,
79607+ },
79608+#endif
79609+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
79610+ {
79611+ .procname = "chroot_deny_sysctl",
79612+ .data = &grsec_enable_chroot_sysctl,
79613+ .maxlen = sizeof(int),
79614+ .mode = 0600,
79615+ .proc_handler = &proc_dointvec,
79616+ },
79617+#endif
79618+#ifdef CONFIG_GRKERNSEC_TPE
79619+ {
79620+ .procname = "tpe",
79621+ .data = &grsec_enable_tpe,
79622+ .maxlen = sizeof(int),
79623+ .mode = 0600,
79624+ .proc_handler = &proc_dointvec,
79625+ },
79626+ {
79627+ .procname = "tpe_gid",
79628+ .data = &grsec_tpe_gid,
79629+ .maxlen = sizeof(int),
79630+ .mode = 0600,
79631+ .proc_handler = &proc_dointvec,
79632+ },
79633+#endif
79634+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
79635+ {
79636+ .procname = "tpe_invert",
79637+ .data = &grsec_enable_tpe_invert,
79638+ .maxlen = sizeof(int),
79639+ .mode = 0600,
79640+ .proc_handler = &proc_dointvec,
79641+ },
79642+#endif
79643+#ifdef CONFIG_GRKERNSEC_TPE_ALL
79644+ {
79645+ .procname = "tpe_restrict_all",
79646+ .data = &grsec_enable_tpe_all,
79647+ .maxlen = sizeof(int),
79648+ .mode = 0600,
79649+ .proc_handler = &proc_dointvec,
79650+ },
79651+#endif
79652+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
79653+ {
79654+ .procname = "socket_all",
79655+ .data = &grsec_enable_socket_all,
79656+ .maxlen = sizeof(int),
79657+ .mode = 0600,
79658+ .proc_handler = &proc_dointvec,
79659+ },
79660+ {
79661+ .procname = "socket_all_gid",
79662+ .data = &grsec_socket_all_gid,
79663+ .maxlen = sizeof(int),
79664+ .mode = 0600,
79665+ .proc_handler = &proc_dointvec,
79666+ },
79667+#endif
79668+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
79669+ {
79670+ .procname = "socket_client",
79671+ .data = &grsec_enable_socket_client,
79672+ .maxlen = sizeof(int),
79673+ .mode = 0600,
79674+ .proc_handler = &proc_dointvec,
79675+ },
79676+ {
79677+ .procname = "socket_client_gid",
79678+ .data = &grsec_socket_client_gid,
79679+ .maxlen = sizeof(int),
79680+ .mode = 0600,
79681+ .proc_handler = &proc_dointvec,
79682+ },
79683+#endif
79684+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
79685+ {
79686+ .procname = "socket_server",
79687+ .data = &grsec_enable_socket_server,
79688+ .maxlen = sizeof(int),
79689+ .mode = 0600,
79690+ .proc_handler = &proc_dointvec,
79691+ },
79692+ {
79693+ .procname = "socket_server_gid",
79694+ .data = &grsec_socket_server_gid,
79695+ .maxlen = sizeof(int),
79696+ .mode = 0600,
79697+ .proc_handler = &proc_dointvec,
79698+ },
79699+#endif
79700+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
79701+ {
79702+ .procname = "audit_group",
79703+ .data = &grsec_enable_group,
79704+ .maxlen = sizeof(int),
79705+ .mode = 0600,
79706+ .proc_handler = &proc_dointvec,
79707+ },
79708+ {
79709+ .procname = "audit_gid",
79710+ .data = &grsec_audit_gid,
79711+ .maxlen = sizeof(int),
79712+ .mode = 0600,
79713+ .proc_handler = &proc_dointvec,
79714+ },
79715+#endif
79716+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
79717+ {
79718+ .procname = "audit_chdir",
79719+ .data = &grsec_enable_chdir,
79720+ .maxlen = sizeof(int),
79721+ .mode = 0600,
79722+ .proc_handler = &proc_dointvec,
79723+ },
79724+#endif
79725+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
79726+ {
79727+ .procname = "audit_mount",
79728+ .data = &grsec_enable_mount,
79729+ .maxlen = sizeof(int),
79730+ .mode = 0600,
79731+ .proc_handler = &proc_dointvec,
79732+ },
79733+#endif
79734+#ifdef CONFIG_GRKERNSEC_DMESG
79735+ {
79736+ .procname = "dmesg",
79737+ .data = &grsec_enable_dmesg,
79738+ .maxlen = sizeof(int),
79739+ .mode = 0600,
79740+ .proc_handler = &proc_dointvec,
79741+ },
79742+#endif
79743+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
79744+ {
79745+ .procname = "chroot_findtask",
79746+ .data = &grsec_enable_chroot_findtask,
79747+ .maxlen = sizeof(int),
79748+ .mode = 0600,
79749+ .proc_handler = &proc_dointvec,
79750+ },
79751+#endif
79752+#ifdef CONFIG_GRKERNSEC_RESLOG
79753+ {
79754+ .procname = "resource_logging",
79755+ .data = &grsec_resource_logging,
79756+ .maxlen = sizeof(int),
79757+ .mode = 0600,
79758+ .proc_handler = &proc_dointvec,
79759+ },
79760+#endif
79761+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
79762+ {
79763+ .procname = "audit_ptrace",
79764+ .data = &grsec_enable_audit_ptrace,
79765+ .maxlen = sizeof(int),
79766+ .mode = 0600,
79767+ .proc_handler = &proc_dointvec,
79768+ },
79769+#endif
79770+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
79771+ {
79772+ .procname = "harden_ptrace",
79773+ .data = &grsec_enable_harden_ptrace,
79774+ .maxlen = sizeof(int),
79775+ .mode = 0600,
79776+ .proc_handler = &proc_dointvec,
79777+ },
79778+#endif
79779+#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
79780+ {
79781+ .procname = "harden_ipc",
79782+ .data = &grsec_enable_harden_ipc,
79783+ .maxlen = sizeof(int),
79784+ .mode = 0600,
79785+ .proc_handler = &proc_dointvec,
79786+ },
79787+#endif
79788+ {
79789+ .procname = "grsec_lock",
79790+ .data = &grsec_lock,
79791+ .maxlen = sizeof(int),
79792+ .mode = 0600,
79793+ .proc_handler = &proc_dointvec,
79794+ },
79795+#endif
79796+#ifdef CONFIG_GRKERNSEC_ROFS
79797+ {
79798+ .procname = "romount_protect",
79799+ .data = &grsec_enable_rofs,
79800+ .maxlen = sizeof(int),
79801+ .mode = 0600,
79802+ .proc_handler = &proc_dointvec_minmax,
79803+ .extra1 = &one,
79804+ .extra2 = &one,
79805+ },
79806+#endif
79807+#if defined(CONFIG_GRKERNSEC_DENYUSB) && !defined(CONFIG_GRKERNSEC_DENYUSB_FORCE)
79808+ {
79809+ .procname = "deny_new_usb",
79810+ .data = &grsec_deny_new_usb,
79811+ .maxlen = sizeof(int),
79812+ .mode = 0600,
79813+ .proc_handler = &proc_dointvec,
79814+ },
79815+#endif
79816+ { }
79817+};
79818+#endif
79819diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
79820new file mode 100644
79821index 0000000..61b514e
79822--- /dev/null
79823+++ b/grsecurity/grsec_time.c
79824@@ -0,0 +1,16 @@
79825+#include <linux/kernel.h>
79826+#include <linux/sched.h>
79827+#include <linux/grinternal.h>
79828+#include <linux/module.h>
79829+
79830+void
79831+gr_log_timechange(void)
79832+{
79833+#ifdef CONFIG_GRKERNSEC_TIME
79834+ if (grsec_enable_time)
79835+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
79836+#endif
79837+ return;
79838+}
79839+
79840+EXPORT_SYMBOL_GPL(gr_log_timechange);
79841diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
79842new file mode 100644
79843index 0000000..d1953de
79844--- /dev/null
79845+++ b/grsecurity/grsec_tpe.c
79846@@ -0,0 +1,78 @@
79847+#include <linux/kernel.h>
79848+#include <linux/sched.h>
79849+#include <linux/file.h>
79850+#include <linux/fs.h>
79851+#include <linux/grinternal.h>
79852+
79853+extern int gr_acl_tpe_check(void);
79854+
79855+int
79856+gr_tpe_allow(const struct file *file)
79857+{
79858+#ifdef CONFIG_GRKERNSEC
79859+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
79860+ struct inode *file_inode = file->f_path.dentry->d_inode;
79861+ const struct cred *cred = current_cred();
79862+ char *msg = NULL;
79863+ char *msg2 = NULL;
79864+
79865+ // never restrict root
79866+ if (gr_is_global_root(cred->uid))
79867+ return 1;
79868+
79869+ if (grsec_enable_tpe) {
79870+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
79871+ if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
79872+ msg = "not being in trusted group";
79873+ else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
79874+ msg = "being in untrusted group";
79875+#else
79876+ if (in_group_p(grsec_tpe_gid))
79877+ msg = "being in untrusted group";
79878+#endif
79879+ }
79880+ if (!msg && gr_acl_tpe_check())
79881+ msg = "being in untrusted role";
79882+
79883+ // not in any affected group/role
79884+ if (!msg)
79885+ goto next_check;
79886+
79887+ if (gr_is_global_nonroot(inode->i_uid))
79888+ msg2 = "file in non-root-owned directory";
79889+ else if (inode->i_mode & S_IWOTH)
79890+ msg2 = "file in world-writable directory";
79891+ else if (inode->i_mode & S_IWGRP)
79892+ msg2 = "file in group-writable directory";
79893+ else if (file_inode->i_mode & S_IWOTH)
79894+ msg2 = "file is world-writable";
79895+
79896+ if (msg && msg2) {
79897+ char fullmsg[70] = {0};
79898+ snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
79899+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
79900+ return 0;
79901+ }
79902+ msg = NULL;
79903+next_check:
79904+#ifdef CONFIG_GRKERNSEC_TPE_ALL
79905+ if (!grsec_enable_tpe || !grsec_enable_tpe_all)
79906+ return 1;
79907+
79908+ if (gr_is_global_nonroot(inode->i_uid) && !uid_eq(inode->i_uid, cred->uid))
79909+ msg = "directory not owned by user";
79910+ else if (inode->i_mode & S_IWOTH)
79911+ msg = "file in world-writable directory";
79912+ else if (inode->i_mode & S_IWGRP)
79913+ msg = "file in group-writable directory";
79914+ else if (file_inode->i_mode & S_IWOTH)
79915+ msg = "file is world-writable";
79916+
79917+ if (msg) {
79918+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
79919+ return 0;
79920+ }
79921+#endif
79922+#endif
79923+ return 1;
79924+}
79925diff --git a/grsecurity/grsec_usb.c b/grsecurity/grsec_usb.c
79926new file mode 100644
79927index 0000000..ae02d8e
79928--- /dev/null
79929+++ b/grsecurity/grsec_usb.c
79930@@ -0,0 +1,15 @@
79931+#include <linux/kernel.h>
79932+#include <linux/grinternal.h>
79933+#include <linux/module.h>
79934+
79935+int gr_handle_new_usb(void)
79936+{
79937+#ifdef CONFIG_GRKERNSEC_DENYUSB
79938+ if (grsec_deny_new_usb) {
79939+ printk(KERN_ALERT "grsec: denied insert of new USB device\n");
79940+ return 1;
79941+ }
79942+#endif
79943+ return 0;
79944+}
79945+EXPORT_SYMBOL_GPL(gr_handle_new_usb);
79946diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
79947new file mode 100644
79948index 0000000..158b330
79949--- /dev/null
79950+++ b/grsecurity/grsum.c
79951@@ -0,0 +1,64 @@
79952+#include <linux/err.h>
79953+#include <linux/kernel.h>
79954+#include <linux/sched.h>
79955+#include <linux/mm.h>
79956+#include <linux/scatterlist.h>
79957+#include <linux/crypto.h>
79958+#include <linux/gracl.h>
79959+
79960+
79961+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
79962+#error "crypto and sha256 must be built into the kernel"
79963+#endif
79964+
79965+int
79966+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
79967+{
79968+ struct crypto_hash *tfm;
79969+ struct hash_desc desc;
79970+ struct scatterlist sg[2];
79971+ unsigned char temp_sum[GR_SHA_LEN] __attribute__((aligned(__alignof__(unsigned long))));
79972+ unsigned long *tmpsumptr = (unsigned long *)temp_sum;
79973+ unsigned long *sumptr = (unsigned long *)sum;
79974+ int cryptres;
79975+ int retval = 1;
79976+ volatile int mismatched = 0;
79977+ volatile int dummy = 0;
79978+ unsigned int i;
79979+
79980+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
79981+ if (IS_ERR(tfm)) {
79982+ /* should never happen, since sha256 should be built in */
79983+ memset(entry->pw, 0, GR_PW_LEN);
79984+ return 1;
79985+ }
79986+
79987+ sg_init_table(sg, 2);
79988+ sg_set_buf(&sg[0], salt, GR_SALT_LEN);
79989+ sg_set_buf(&sg[1], entry->pw, strlen(entry->pw));
79990+
79991+ desc.tfm = tfm;
79992+ desc.flags = 0;
79993+
79994+ cryptres = crypto_hash_digest(&desc, sg, GR_SALT_LEN + strlen(entry->pw),
79995+ temp_sum);
79996+
79997+ memset(entry->pw, 0, GR_PW_LEN);
79998+
79999+ if (cryptres)
80000+ goto out;
80001+
80002+ for (i = 0; i < GR_SHA_LEN/sizeof(tmpsumptr[0]); i++)
80003+ if (sumptr[i] != tmpsumptr[i])
80004+ mismatched = 1;
80005+ else
80006+ dummy = 1; // waste a cycle
80007+
80008+ if (!mismatched)
80009+ retval = dummy - 1;
80010+
80011+out:
80012+ crypto_free_hash(tfm);
80013+
80014+ return retval;
80015+}
80016diff --git a/include/asm-generic/4level-fixup.h b/include/asm-generic/4level-fixup.h
80017index 77ff547..181834f 100644
80018--- a/include/asm-generic/4level-fixup.h
80019+++ b/include/asm-generic/4level-fixup.h
80020@@ -13,8 +13,10 @@
80021 #define pmd_alloc(mm, pud, address) \
80022 ((unlikely(pgd_none(*(pud))) && __pmd_alloc(mm, pud, address))? \
80023 NULL: pmd_offset(pud, address))
80024+#define pmd_alloc_kernel(mm, pud, address) pmd_alloc((mm), (pud), (address))
80025
80026 #define pud_alloc(mm, pgd, address) (pgd)
80027+#define pud_alloc_kernel(mm, pgd, address) pud_alloc((mm), (pgd), (address))
80028 #define pud_offset(pgd, start) (pgd)
80029 #define pud_none(pud) 0
80030 #define pud_bad(pud) 0
80031diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
80032index b7babf0..97f4c4f 100644
80033--- a/include/asm-generic/atomic-long.h
80034+++ b/include/asm-generic/atomic-long.h
80035@@ -22,6 +22,12 @@
80036
80037 typedef atomic64_t atomic_long_t;
80038
80039+#ifdef CONFIG_PAX_REFCOUNT
80040+typedef atomic64_unchecked_t atomic_long_unchecked_t;
80041+#else
80042+typedef atomic64_t atomic_long_unchecked_t;
80043+#endif
80044+
80045 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
80046
80047 static inline long atomic_long_read(atomic_long_t *l)
80048@@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
80049 return (long)atomic64_read(v);
80050 }
80051
80052+#ifdef CONFIG_PAX_REFCOUNT
80053+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
80054+{
80055+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
80056+
80057+ return (long)atomic64_read_unchecked(v);
80058+}
80059+#endif
80060+
80061 static inline void atomic_long_set(atomic_long_t *l, long i)
80062 {
80063 atomic64_t *v = (atomic64_t *)l;
80064@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
80065 atomic64_set(v, i);
80066 }
80067
80068+#ifdef CONFIG_PAX_REFCOUNT
80069+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
80070+{
80071+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
80072+
80073+ atomic64_set_unchecked(v, i);
80074+}
80075+#endif
80076+
80077 static inline void atomic_long_inc(atomic_long_t *l)
80078 {
80079 atomic64_t *v = (atomic64_t *)l;
80080@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
80081 atomic64_inc(v);
80082 }
80083
80084+#ifdef CONFIG_PAX_REFCOUNT
80085+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
80086+{
80087+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
80088+
80089+ atomic64_inc_unchecked(v);
80090+}
80091+#endif
80092+
80093 static inline void atomic_long_dec(atomic_long_t *l)
80094 {
80095 atomic64_t *v = (atomic64_t *)l;
80096@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
80097 atomic64_dec(v);
80098 }
80099
80100+#ifdef CONFIG_PAX_REFCOUNT
80101+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
80102+{
80103+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
80104+
80105+ atomic64_dec_unchecked(v);
80106+}
80107+#endif
80108+
80109 static inline void atomic_long_add(long i, atomic_long_t *l)
80110 {
80111 atomic64_t *v = (atomic64_t *)l;
80112@@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
80113 atomic64_add(i, v);
80114 }
80115
80116+#ifdef CONFIG_PAX_REFCOUNT
80117+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
80118+{
80119+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
80120+
80121+ atomic64_add_unchecked(i, v);
80122+}
80123+#endif
80124+
80125 static inline void atomic_long_sub(long i, atomic_long_t *l)
80126 {
80127 atomic64_t *v = (atomic64_t *)l;
80128@@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
80129 atomic64_sub(i, v);
80130 }
80131
80132+#ifdef CONFIG_PAX_REFCOUNT
80133+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
80134+{
80135+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
80136+
80137+ atomic64_sub_unchecked(i, v);
80138+}
80139+#endif
80140+
80141 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
80142 {
80143 atomic64_t *v = (atomic64_t *)l;
80144@@ -94,13 +154,22 @@ static inline int atomic_long_add_negative(long i, atomic_long_t *l)
80145 return atomic64_add_negative(i, v);
80146 }
80147
80148-static inline long atomic_long_add_return(long i, atomic_long_t *l)
80149+static inline long __intentional_overflow(-1) atomic_long_add_return(long i, atomic_long_t *l)
80150 {
80151 atomic64_t *v = (atomic64_t *)l;
80152
80153 return (long)atomic64_add_return(i, v);
80154 }
80155
80156+#ifdef CONFIG_PAX_REFCOUNT
80157+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
80158+{
80159+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
80160+
80161+ return (long)atomic64_add_return_unchecked(i, v);
80162+}
80163+#endif
80164+
80165 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
80166 {
80167 atomic64_t *v = (atomic64_t *)l;
80168@@ -115,6 +184,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
80169 return (long)atomic64_inc_return(v);
80170 }
80171
80172+#ifdef CONFIG_PAX_REFCOUNT
80173+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
80174+{
80175+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
80176+
80177+ return (long)atomic64_inc_return_unchecked(v);
80178+}
80179+#endif
80180+
80181 static inline long atomic_long_dec_return(atomic_long_t *l)
80182 {
80183 atomic64_t *v = (atomic64_t *)l;
80184@@ -140,6 +218,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
80185
80186 typedef atomic_t atomic_long_t;
80187
80188+#ifdef CONFIG_PAX_REFCOUNT
80189+typedef atomic_unchecked_t atomic_long_unchecked_t;
80190+#else
80191+typedef atomic_t atomic_long_unchecked_t;
80192+#endif
80193+
80194 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
80195 static inline long atomic_long_read(atomic_long_t *l)
80196 {
80197@@ -148,6 +232,15 @@ static inline long atomic_long_read(atomic_long_t *l)
80198 return (long)atomic_read(v);
80199 }
80200
80201+#ifdef CONFIG_PAX_REFCOUNT
80202+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
80203+{
80204+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
80205+
80206+ return (long)atomic_read_unchecked(v);
80207+}
80208+#endif
80209+
80210 static inline void atomic_long_set(atomic_long_t *l, long i)
80211 {
80212 atomic_t *v = (atomic_t *)l;
80213@@ -155,6 +248,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
80214 atomic_set(v, i);
80215 }
80216
80217+#ifdef CONFIG_PAX_REFCOUNT
80218+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
80219+{
80220+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
80221+
80222+ atomic_set_unchecked(v, i);
80223+}
80224+#endif
80225+
80226 static inline void atomic_long_inc(atomic_long_t *l)
80227 {
80228 atomic_t *v = (atomic_t *)l;
80229@@ -162,6 +264,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
80230 atomic_inc(v);
80231 }
80232
80233+#ifdef CONFIG_PAX_REFCOUNT
80234+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
80235+{
80236+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
80237+
80238+ atomic_inc_unchecked(v);
80239+}
80240+#endif
80241+
80242 static inline void atomic_long_dec(atomic_long_t *l)
80243 {
80244 atomic_t *v = (atomic_t *)l;
80245@@ -169,6 +280,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
80246 atomic_dec(v);
80247 }
80248
80249+#ifdef CONFIG_PAX_REFCOUNT
80250+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
80251+{
80252+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
80253+
80254+ atomic_dec_unchecked(v);
80255+}
80256+#endif
80257+
80258 static inline void atomic_long_add(long i, atomic_long_t *l)
80259 {
80260 atomic_t *v = (atomic_t *)l;
80261@@ -176,6 +296,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
80262 atomic_add(i, v);
80263 }
80264
80265+#ifdef CONFIG_PAX_REFCOUNT
80266+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
80267+{
80268+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
80269+
80270+ atomic_add_unchecked(i, v);
80271+}
80272+#endif
80273+
80274 static inline void atomic_long_sub(long i, atomic_long_t *l)
80275 {
80276 atomic_t *v = (atomic_t *)l;
80277@@ -183,6 +312,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
80278 atomic_sub(i, v);
80279 }
80280
80281+#ifdef CONFIG_PAX_REFCOUNT
80282+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
80283+{
80284+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
80285+
80286+ atomic_sub_unchecked(i, v);
80287+}
80288+#endif
80289+
80290 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
80291 {
80292 atomic_t *v = (atomic_t *)l;
80293@@ -218,6 +356,16 @@ static inline long atomic_long_add_return(long i, atomic_long_t *l)
80294 return (long)atomic_add_return(i, v);
80295 }
80296
80297+#ifdef CONFIG_PAX_REFCOUNT
80298+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
80299+{
80300+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
80301+
80302+ return (long)atomic_add_return_unchecked(i, v);
80303+}
80304+
80305+#endif
80306+
80307 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
80308 {
80309 atomic_t *v = (atomic_t *)l;
80310@@ -232,6 +380,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
80311 return (long)atomic_inc_return(v);
80312 }
80313
80314+#ifdef CONFIG_PAX_REFCOUNT
80315+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
80316+{
80317+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
80318+
80319+ return (long)atomic_inc_return_unchecked(v);
80320+}
80321+#endif
80322+
80323 static inline long atomic_long_dec_return(atomic_long_t *l)
80324 {
80325 atomic_t *v = (atomic_t *)l;
80326@@ -255,4 +412,57 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
80327
80328 #endif /* BITS_PER_LONG == 64 */
80329
80330+#ifdef CONFIG_PAX_REFCOUNT
80331+static inline void pax_refcount_needs_these_functions(void)
80332+{
80333+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
80334+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
80335+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
80336+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
80337+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
80338+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
80339+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
80340+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
80341+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
80342+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
80343+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
80344+#ifdef CONFIG_X86
80345+ atomic_clear_mask_unchecked(0, NULL);
80346+ atomic_set_mask_unchecked(0, NULL);
80347+#endif
80348+
80349+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
80350+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
80351+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
80352+ atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
80353+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
80354+ atomic_long_add_return_unchecked(0, (atomic_long_unchecked_t *)NULL);
80355+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
80356+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
80357+}
80358+#else
80359+#define atomic_read_unchecked(v) atomic_read(v)
80360+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
80361+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
80362+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
80363+#define atomic_inc_unchecked(v) atomic_inc(v)
80364+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
80365+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
80366+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
80367+#define atomic_dec_unchecked(v) atomic_dec(v)
80368+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
80369+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
80370+#define atomic_clear_mask_unchecked(mask, v) atomic_clear_mask((mask), (v))
80371+#define atomic_set_mask_unchecked(mask, v) atomic_set_mask((mask), (v))
80372+
80373+#define atomic_long_read_unchecked(v) atomic_long_read(v)
80374+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
80375+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
80376+#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
80377+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
80378+#define atomic_long_add_return_unchecked(i, v) atomic_long_add_return((i), (v))
80379+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
80380+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
80381+#endif
80382+
80383 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
80384diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
80385index 9c79e76..9f7827d 100644
80386--- a/include/asm-generic/atomic.h
80387+++ b/include/asm-generic/atomic.h
80388@@ -154,7 +154,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
80389 * Atomically clears the bits set in @mask from @v
80390 */
80391 #ifndef atomic_clear_mask
80392-static inline void atomic_clear_mask(unsigned long mask, atomic_t *v)
80393+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
80394 {
80395 unsigned long flags;
80396
80397diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
80398index b18ce4f..2ee2843 100644
80399--- a/include/asm-generic/atomic64.h
80400+++ b/include/asm-generic/atomic64.h
80401@@ -16,6 +16,8 @@ typedef struct {
80402 long long counter;
80403 } atomic64_t;
80404
80405+typedef atomic64_t atomic64_unchecked_t;
80406+
80407 #define ATOMIC64_INIT(i) { (i) }
80408
80409 extern long long atomic64_read(const atomic64_t *v);
80410@@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
80411 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
80412 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
80413
80414+#define atomic64_read_unchecked(v) atomic64_read(v)
80415+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
80416+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
80417+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
80418+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
80419+#define atomic64_inc_unchecked(v) atomic64_inc(v)
80420+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
80421+#define atomic64_dec_unchecked(v) atomic64_dec(v)
80422+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
80423+
80424 #endif /* _ASM_GENERIC_ATOMIC64_H */
80425diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h
80426index 1402fa8..025a736 100644
80427--- a/include/asm-generic/barrier.h
80428+++ b/include/asm-generic/barrier.h
80429@@ -74,7 +74,7 @@
80430 do { \
80431 compiletime_assert_atomic_type(*p); \
80432 smp_mb(); \
80433- ACCESS_ONCE(*p) = (v); \
80434+ ACCESS_ONCE_RW(*p) = (v); \
80435 } while (0)
80436
80437 #define smp_load_acquire(p) \
80438diff --git a/include/asm-generic/bitops/__fls.h b/include/asm-generic/bitops/__fls.h
80439index a60a7cc..0fe12f2 100644
80440--- a/include/asm-generic/bitops/__fls.h
80441+++ b/include/asm-generic/bitops/__fls.h
80442@@ -9,7 +9,7 @@
80443 *
80444 * Undefined if no set bit exists, so code should check against 0 first.
80445 */
80446-static __always_inline unsigned long __fls(unsigned long word)
80447+static __always_inline unsigned long __intentional_overflow(-1) __fls(unsigned long word)
80448 {
80449 int num = BITS_PER_LONG - 1;
80450
80451diff --git a/include/asm-generic/bitops/fls.h b/include/asm-generic/bitops/fls.h
80452index 0576d1f..dad6c71 100644
80453--- a/include/asm-generic/bitops/fls.h
80454+++ b/include/asm-generic/bitops/fls.h
80455@@ -9,7 +9,7 @@
80456 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
80457 */
80458
80459-static __always_inline int fls(int x)
80460+static __always_inline int __intentional_overflow(-1) fls(int x)
80461 {
80462 int r = 32;
80463
80464diff --git a/include/asm-generic/bitops/fls64.h b/include/asm-generic/bitops/fls64.h
80465index b097cf8..3d40e14 100644
80466--- a/include/asm-generic/bitops/fls64.h
80467+++ b/include/asm-generic/bitops/fls64.h
80468@@ -15,7 +15,7 @@
80469 * at position 64.
80470 */
80471 #if BITS_PER_LONG == 32
80472-static __always_inline int fls64(__u64 x)
80473+static __always_inline int __intentional_overflow(-1) fls64(__u64 x)
80474 {
80475 __u32 h = x >> 32;
80476 if (h)
80477@@ -23,7 +23,7 @@ static __always_inline int fls64(__u64 x)
80478 return fls(x);
80479 }
80480 #elif BITS_PER_LONG == 64
80481-static __always_inline int fls64(__u64 x)
80482+static __always_inline int __intentional_overflow(-1) fls64(__u64 x)
80483 {
80484 if (x == 0)
80485 return 0;
80486diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
80487index 1bfcfe5..e04c5c9 100644
80488--- a/include/asm-generic/cache.h
80489+++ b/include/asm-generic/cache.h
80490@@ -6,7 +6,7 @@
80491 * cache lines need to provide their own cache.h.
80492 */
80493
80494-#define L1_CACHE_SHIFT 5
80495-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
80496+#define L1_CACHE_SHIFT 5UL
80497+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
80498
80499 #endif /* __ASM_GENERIC_CACHE_H */
80500diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
80501index 0d68a1e..b74a761 100644
80502--- a/include/asm-generic/emergency-restart.h
80503+++ b/include/asm-generic/emergency-restart.h
80504@@ -1,7 +1,7 @@
80505 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
80506 #define _ASM_GENERIC_EMERGENCY_RESTART_H
80507
80508-static inline void machine_emergency_restart(void)
80509+static inline __noreturn void machine_emergency_restart(void)
80510 {
80511 machine_restart(NULL);
80512 }
80513diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
80514index 975e1cc..0b8a083 100644
80515--- a/include/asm-generic/io.h
80516+++ b/include/asm-generic/io.h
80517@@ -289,7 +289,7 @@ static inline void pci_iounmap(struct pci_dev *dev, void __iomem *p)
80518 * These are pretty trivial
80519 */
80520 #ifndef virt_to_phys
80521-static inline unsigned long virt_to_phys(volatile void *address)
80522+static inline unsigned long __intentional_overflow(-1) virt_to_phys(volatile void *address)
80523 {
80524 return __pa((unsigned long)address);
80525 }
80526diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
80527index 90f99c7..00ce236 100644
80528--- a/include/asm-generic/kmap_types.h
80529+++ b/include/asm-generic/kmap_types.h
80530@@ -2,9 +2,9 @@
80531 #define _ASM_GENERIC_KMAP_TYPES_H
80532
80533 #ifdef __WITH_KM_FENCE
80534-# define KM_TYPE_NR 41
80535+# define KM_TYPE_NR 42
80536 #else
80537-# define KM_TYPE_NR 20
80538+# define KM_TYPE_NR 21
80539 #endif
80540
80541 #endif
80542diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
80543index 9ceb03b..62b0b8f 100644
80544--- a/include/asm-generic/local.h
80545+++ b/include/asm-generic/local.h
80546@@ -23,24 +23,37 @@ typedef struct
80547 atomic_long_t a;
80548 } local_t;
80549
80550+typedef struct {
80551+ atomic_long_unchecked_t a;
80552+} local_unchecked_t;
80553+
80554 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
80555
80556 #define local_read(l) atomic_long_read(&(l)->a)
80557+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
80558 #define local_set(l,i) atomic_long_set((&(l)->a),(i))
80559+#define local_set_unchecked(l,i) atomic_long_set_unchecked((&(l)->a),(i))
80560 #define local_inc(l) atomic_long_inc(&(l)->a)
80561+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
80562 #define local_dec(l) atomic_long_dec(&(l)->a)
80563+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
80564 #define local_add(i,l) atomic_long_add((i),(&(l)->a))
80565+#define local_add_unchecked(i,l) atomic_long_add_unchecked((i),(&(l)->a))
80566 #define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
80567+#define local_sub_unchecked(i,l) atomic_long_sub_unchecked((i),(&(l)->a))
80568
80569 #define local_sub_and_test(i, l) atomic_long_sub_and_test((i), (&(l)->a))
80570 #define local_dec_and_test(l) atomic_long_dec_and_test(&(l)->a)
80571 #define local_inc_and_test(l) atomic_long_inc_and_test(&(l)->a)
80572 #define local_add_negative(i, l) atomic_long_add_negative((i), (&(l)->a))
80573 #define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a))
80574+#define local_add_return_unchecked(i, l) atomic_long_add_return_unchecked((i), (&(l)->a))
80575 #define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a))
80576 #define local_inc_return(l) atomic_long_inc_return(&(l)->a)
80577+#define local_dec_return(l) atomic_long_dec_return(&(l)->a)
80578
80579 #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
80580+#define local_cmpxchg_unchecked(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
80581 #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
80582 #define local_add_unless(l, _a, u) atomic_long_add_unless((&(l)->a), (_a), (u))
80583 #define local_inc_not_zero(l) atomic_long_inc_not_zero(&(l)->a)
80584diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
80585index 725612b..9cc513a 100644
80586--- a/include/asm-generic/pgtable-nopmd.h
80587+++ b/include/asm-generic/pgtable-nopmd.h
80588@@ -1,14 +1,19 @@
80589 #ifndef _PGTABLE_NOPMD_H
80590 #define _PGTABLE_NOPMD_H
80591
80592-#ifndef __ASSEMBLY__
80593-
80594 #include <asm-generic/pgtable-nopud.h>
80595
80596-struct mm_struct;
80597-
80598 #define __PAGETABLE_PMD_FOLDED
80599
80600+#define PMD_SHIFT PUD_SHIFT
80601+#define PTRS_PER_PMD 1
80602+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
80603+#define PMD_MASK (~(PMD_SIZE-1))
80604+
80605+#ifndef __ASSEMBLY__
80606+
80607+struct mm_struct;
80608+
80609 /*
80610 * Having the pmd type consist of a pud gets the size right, and allows
80611 * us to conceptually access the pud entry that this pmd is folded into
80612@@ -16,11 +21,6 @@ struct mm_struct;
80613 */
80614 typedef struct { pud_t pud; } pmd_t;
80615
80616-#define PMD_SHIFT PUD_SHIFT
80617-#define PTRS_PER_PMD 1
80618-#define PMD_SIZE (1UL << PMD_SHIFT)
80619-#define PMD_MASK (~(PMD_SIZE-1))
80620-
80621 /*
80622 * The "pud_xxx()" functions here are trivial for a folded two-level
80623 * setup: the pmd is never bad, and a pmd always exists (as it's folded
80624diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
80625index 810431d..0ec4804f 100644
80626--- a/include/asm-generic/pgtable-nopud.h
80627+++ b/include/asm-generic/pgtable-nopud.h
80628@@ -1,10 +1,15 @@
80629 #ifndef _PGTABLE_NOPUD_H
80630 #define _PGTABLE_NOPUD_H
80631
80632-#ifndef __ASSEMBLY__
80633-
80634 #define __PAGETABLE_PUD_FOLDED
80635
80636+#define PUD_SHIFT PGDIR_SHIFT
80637+#define PTRS_PER_PUD 1
80638+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
80639+#define PUD_MASK (~(PUD_SIZE-1))
80640+
80641+#ifndef __ASSEMBLY__
80642+
80643 /*
80644 * Having the pud type consist of a pgd gets the size right, and allows
80645 * us to conceptually access the pgd entry that this pud is folded into
80646@@ -12,11 +17,6 @@
80647 */
80648 typedef struct { pgd_t pgd; } pud_t;
80649
80650-#define PUD_SHIFT PGDIR_SHIFT
80651-#define PTRS_PER_PUD 1
80652-#define PUD_SIZE (1UL << PUD_SHIFT)
80653-#define PUD_MASK (~(PUD_SIZE-1))
80654-
80655 /*
80656 * The "pgd_xxx()" functions here are trivial for a folded two-level
80657 * setup: the pud is never bad, and a pud always exists (as it's folded
80658@@ -29,6 +29,7 @@ static inline void pgd_clear(pgd_t *pgd) { }
80659 #define pud_ERROR(pud) (pgd_ERROR((pud).pgd))
80660
80661 #define pgd_populate(mm, pgd, pud) do { } while (0)
80662+#define pgd_populate_kernel(mm, pgd, pud) do { } while (0)
80663 /*
80664 * (puds are folded into pgds so this doesn't get actually called,
80665 * but the define is needed for a generic inline function.)
80666diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
80667index 53b2acc..f4568e7 100644
80668--- a/include/asm-generic/pgtable.h
80669+++ b/include/asm-generic/pgtable.h
80670@@ -819,6 +819,22 @@ static inline void pmdp_set_numa(struct mm_struct *mm, unsigned long addr,
80671 }
80672 #endif /* CONFIG_NUMA_BALANCING */
80673
80674+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
80675+#ifdef CONFIG_PAX_KERNEXEC
80676+#error KERNEXEC requires pax_open_kernel
80677+#else
80678+static inline unsigned long pax_open_kernel(void) { return 0; }
80679+#endif
80680+#endif
80681+
80682+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
80683+#ifdef CONFIG_PAX_KERNEXEC
80684+#error KERNEXEC requires pax_close_kernel
80685+#else
80686+static inline unsigned long pax_close_kernel(void) { return 0; }
80687+#endif
80688+#endif
80689+
80690 #endif /* CONFIG_MMU */
80691
80692 #endif /* !__ASSEMBLY__ */
80693diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
80694index 72d8803..cb9749c 100644
80695--- a/include/asm-generic/uaccess.h
80696+++ b/include/asm-generic/uaccess.h
80697@@ -343,4 +343,20 @@ clear_user(void __user *to, unsigned long n)
80698 return __clear_user(to, n);
80699 }
80700
80701+#ifndef __HAVE_ARCH_PAX_OPEN_USERLAND
80702+#ifdef CONFIG_PAX_MEMORY_UDEREF
80703+#error UDEREF requires pax_open_userland
80704+#else
80705+static inline unsigned long pax_open_userland(void) { return 0; }
80706+#endif
80707+#endif
80708+
80709+#ifndef __HAVE_ARCH_PAX_CLOSE_USERLAND
80710+#ifdef CONFIG_PAX_MEMORY_UDEREF
80711+#error UDEREF requires pax_close_userland
80712+#else
80713+static inline unsigned long pax_close_userland(void) { return 0; }
80714+#endif
80715+#endif
80716+
80717 #endif /* __ASM_GENERIC_UACCESS_H */
80718diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
80719index 5ba0360..e85c934 100644
80720--- a/include/asm-generic/vmlinux.lds.h
80721+++ b/include/asm-generic/vmlinux.lds.h
80722@@ -231,6 +231,7 @@
80723 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
80724 VMLINUX_SYMBOL(__start_rodata) = .; \
80725 *(.rodata) *(.rodata.*) \
80726+ *(.data..read_only) \
80727 *(__vermagic) /* Kernel version magic */ \
80728 . = ALIGN(8); \
80729 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
80730@@ -722,17 +723,18 @@
80731 * section in the linker script will go there too. @phdr should have
80732 * a leading colon.
80733 *
80734- * Note that this macros defines __per_cpu_load as an absolute symbol.
80735+ * Note that this macros defines per_cpu_load as an absolute symbol.
80736 * If there is no need to put the percpu section at a predetermined
80737 * address, use PERCPU_SECTION.
80738 */
80739 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
80740- VMLINUX_SYMBOL(__per_cpu_load) = .; \
80741- .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
80742+ per_cpu_load = .; \
80743+ .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
80744 - LOAD_OFFSET) { \
80745+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
80746 PERCPU_INPUT(cacheline) \
80747 } phdr \
80748- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
80749+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
80750
80751 /**
80752 * PERCPU_SECTION - define output section for percpu area, simple version
80753diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
80754index 623a59c..1e79ab9 100644
80755--- a/include/crypto/algapi.h
80756+++ b/include/crypto/algapi.h
80757@@ -34,7 +34,7 @@ struct crypto_type {
80758 unsigned int maskclear;
80759 unsigned int maskset;
80760 unsigned int tfmsize;
80761-};
80762+} __do_const;
80763
80764 struct crypto_instance {
80765 struct crypto_alg alg;
80766diff --git a/include/drm/drmP.h b/include/drm/drmP.h
80767index 1968907..7d9ed9f 100644
80768--- a/include/drm/drmP.h
80769+++ b/include/drm/drmP.h
80770@@ -68,6 +68,7 @@
80771 #include <linux/workqueue.h>
80772 #include <linux/poll.h>
80773 #include <asm/pgalloc.h>
80774+#include <asm/local.h>
80775 #include <drm/drm.h>
80776 #include <drm/drm_sarea.h>
80777 #include <drm/drm_vma_manager.h>
80778@@ -260,10 +261,12 @@ do { \
80779 * \param cmd command.
80780 * \param arg argument.
80781 */
80782-typedef int drm_ioctl_t(struct drm_device *dev, void *data,
80783+typedef int (* const drm_ioctl_t)(struct drm_device *dev, void *data,
80784+ struct drm_file *file_priv);
80785+typedef int (* drm_ioctl_no_const_t)(struct drm_device *dev, void *data,
80786 struct drm_file *file_priv);
80787
80788-typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
80789+typedef int (* const drm_ioctl_compat_t)(struct file *filp, unsigned int cmd,
80790 unsigned long arg);
80791
80792 #define DRM_IOCTL_NR(n) _IOC_NR(n)
80793@@ -279,10 +282,10 @@ typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
80794 struct drm_ioctl_desc {
80795 unsigned int cmd;
80796 int flags;
80797- drm_ioctl_t *func;
80798+ drm_ioctl_t func;
80799 unsigned int cmd_drv;
80800 const char *name;
80801-};
80802+} __do_const;
80803
80804 /**
80805 * Creates a driver or general drm_ioctl_desc array entry for the given
80806@@ -946,7 +949,8 @@ struct drm_info_list {
80807 int (*show)(struct seq_file*, void*); /** show callback */
80808 u32 driver_features; /**< Required driver features for this entry */
80809 void *data;
80810-};
80811+} __do_const;
80812+typedef struct drm_info_list __no_const drm_info_list_no_const;
80813
80814 /**
80815 * debugfs node structure. This structure represents a debugfs file.
80816@@ -1030,7 +1034,7 @@ struct drm_device {
80817
80818 /** \name Usage Counters */
80819 /*@{ */
80820- int open_count; /**< Outstanding files open, protected by drm_global_mutex. */
80821+ local_t open_count; /**< Outstanding files open, protected by drm_global_mutex. */
80822 spinlock_t buf_lock; /**< For drm_device::buf_use and a few other things. */
80823 int buf_use; /**< Buffers in use -- cannot alloc */
80824 atomic_t buf_alloc; /**< Buffer allocation in progress */
80825diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
80826index a3d75fe..6802f9c 100644
80827--- a/include/drm/drm_crtc_helper.h
80828+++ b/include/drm/drm_crtc_helper.h
80829@@ -109,7 +109,7 @@ struct drm_encoder_helper_funcs {
80830 struct drm_connector *connector);
80831 /* disable encoder when not in use - more explicit than dpms off */
80832 void (*disable)(struct drm_encoder *encoder);
80833-};
80834+} __no_const;
80835
80836 /**
80837 * drm_connector_helper_funcs - helper operations for connectors
80838diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
80839index a70d456..6ea07cd 100644
80840--- a/include/drm/i915_pciids.h
80841+++ b/include/drm/i915_pciids.h
80842@@ -37,7 +37,7 @@
80843 */
80844 #define INTEL_VGA_DEVICE(id, info) { \
80845 0x8086, id, \
80846- ~0, ~0, \
80847+ PCI_ANY_ID, PCI_ANY_ID, \
80848 0x030000, 0xff0000, \
80849 (unsigned long) info }
80850
80851diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
80852index 72dcbe8..8db58d7 100644
80853--- a/include/drm/ttm/ttm_memory.h
80854+++ b/include/drm/ttm/ttm_memory.h
80855@@ -48,7 +48,7 @@
80856
80857 struct ttm_mem_shrink {
80858 int (*do_shrink) (struct ttm_mem_shrink *);
80859-};
80860+} __no_const;
80861
80862 /**
80863 * struct ttm_mem_global - Global memory accounting structure.
80864diff --git a/include/drm/ttm/ttm_page_alloc.h b/include/drm/ttm/ttm_page_alloc.h
80865index 49a8284..9643967 100644
80866--- a/include/drm/ttm/ttm_page_alloc.h
80867+++ b/include/drm/ttm/ttm_page_alloc.h
80868@@ -80,6 +80,7 @@ void ttm_dma_page_alloc_fini(void);
80869 */
80870 extern int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data);
80871
80872+struct device;
80873 extern int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev);
80874 extern void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev);
80875
80876diff --git a/include/keys/asymmetric-subtype.h b/include/keys/asymmetric-subtype.h
80877index 4b840e8..155d235 100644
80878--- a/include/keys/asymmetric-subtype.h
80879+++ b/include/keys/asymmetric-subtype.h
80880@@ -37,7 +37,7 @@ struct asymmetric_key_subtype {
80881 /* Verify the signature on a key of this subtype (optional) */
80882 int (*verify_signature)(const struct key *key,
80883 const struct public_key_signature *sig);
80884-};
80885+} __do_const;
80886
80887 /**
80888 * asymmetric_key_subtype - Get the subtype from an asymmetric key
80889diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
80890index c1da539..1dcec55 100644
80891--- a/include/linux/atmdev.h
80892+++ b/include/linux/atmdev.h
80893@@ -28,7 +28,7 @@ struct compat_atm_iobuf {
80894 #endif
80895
80896 struct k_atm_aal_stats {
80897-#define __HANDLE_ITEM(i) atomic_t i
80898+#define __HANDLE_ITEM(i) atomic_unchecked_t i
80899 __AAL_STAT_ITEMS
80900 #undef __HANDLE_ITEM
80901 };
80902@@ -200,7 +200,7 @@ struct atmdev_ops { /* only send is required */
80903 int (*change_qos)(struct atm_vcc *vcc,struct atm_qos *qos,int flags);
80904 int (*proc_read)(struct atm_dev *dev,loff_t *pos,char *page);
80905 struct module *owner;
80906-};
80907+} __do_const ;
80908
80909 struct atmphy_ops {
80910 int (*start)(struct atm_dev *dev);
80911diff --git a/include/linux/audit.h b/include/linux/audit.h
80912index 22cfddb..1514eef 100644
80913--- a/include/linux/audit.h
80914+++ b/include/linux/audit.h
80915@@ -86,7 +86,7 @@ extern unsigned compat_dir_class[];
80916 extern unsigned compat_chattr_class[];
80917 extern unsigned compat_signal_class[];
80918
80919-extern int __weak audit_classify_compat_syscall(int abi, unsigned syscall);
80920+extern int audit_classify_compat_syscall(int abi, unsigned syscall);
80921
80922 /* audit_names->type values */
80923 #define AUDIT_TYPE_UNKNOWN 0 /* we don't know yet */
80924@@ -210,7 +210,7 @@ static inline void audit_ptrace(struct task_struct *t)
80925 extern unsigned int audit_serial(void);
80926 extern int auditsc_get_stamp(struct audit_context *ctx,
80927 struct timespec *t, unsigned int *serial);
80928-extern int audit_set_loginuid(kuid_t loginuid);
80929+extern int __intentional_overflow(-1) audit_set_loginuid(kuid_t loginuid);
80930
80931 static inline kuid_t audit_get_loginuid(struct task_struct *tsk)
80932 {
80933diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
80934index 61f29e5..e67c658 100644
80935--- a/include/linux/binfmts.h
80936+++ b/include/linux/binfmts.h
80937@@ -44,7 +44,7 @@ struct linux_binprm {
80938 unsigned interp_flags;
80939 unsigned interp_data;
80940 unsigned long loader, exec;
80941-};
80942+} __randomize_layout;
80943
80944 #define BINPRM_FLAGS_ENFORCE_NONDUMP_BIT 0
80945 #define BINPRM_FLAGS_ENFORCE_NONDUMP (1 << BINPRM_FLAGS_ENFORCE_NONDUMP_BIT)
80946@@ -73,8 +73,10 @@ struct linux_binfmt {
80947 int (*load_binary)(struct linux_binprm *);
80948 int (*load_shlib)(struct file *);
80949 int (*core_dump)(struct coredump_params *cprm);
80950+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
80951+ void (*handle_mmap)(struct file *);
80952 unsigned long min_coredump; /* minimal dump size */
80953-};
80954+} __do_const __randomize_layout;
80955
80956 extern void __register_binfmt(struct linux_binfmt *fmt, int insert);
80957
80958diff --git a/include/linux/bitops.h b/include/linux/bitops.h
80959index cbc5833..8123ebc 100644
80960--- a/include/linux/bitops.h
80961+++ b/include/linux/bitops.h
80962@@ -122,7 +122,7 @@ static inline __u64 ror64(__u64 word, unsigned int shift)
80963 * @word: value to rotate
80964 * @shift: bits to roll
80965 */
80966-static inline __u32 rol32(__u32 word, unsigned int shift)
80967+static inline __u32 __intentional_overflow(-1) rol32(__u32 word, unsigned int shift)
80968 {
80969 return (word << shift) | (word >> (32 - shift));
80970 }
80971@@ -132,7 +132,7 @@ static inline __u32 rol32(__u32 word, unsigned int shift)
80972 * @word: value to rotate
80973 * @shift: bits to roll
80974 */
80975-static inline __u32 ror32(__u32 word, unsigned int shift)
80976+static inline __u32 __intentional_overflow(-1) ror32(__u32 word, unsigned int shift)
80977 {
80978 return (word >> shift) | (word << (32 - shift));
80979 }
80980@@ -188,7 +188,7 @@ static inline __s32 sign_extend32(__u32 value, int index)
80981 return (__s32)(value << shift) >> shift;
80982 }
80983
80984-static inline unsigned fls_long(unsigned long l)
80985+static inline unsigned __intentional_overflow(-1) fls_long(unsigned long l)
80986 {
80987 if (sizeof(l) == 4)
80988 return fls(l);
80989diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
80990index 518b465..11953e6 100644
80991--- a/include/linux/blkdev.h
80992+++ b/include/linux/blkdev.h
80993@@ -1627,7 +1627,7 @@ struct block_device_operations {
80994 /* this callback is with swap_lock and sometimes page table lock held */
80995 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
80996 struct module *owner;
80997-};
80998+} __do_const;
80999
81000 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
81001 unsigned long);
81002diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
81003index afc1343..9735539 100644
81004--- a/include/linux/blktrace_api.h
81005+++ b/include/linux/blktrace_api.h
81006@@ -25,7 +25,7 @@ struct blk_trace {
81007 struct dentry *dropped_file;
81008 struct dentry *msg_file;
81009 struct list_head running_list;
81010- atomic_t dropped;
81011+ atomic_unchecked_t dropped;
81012 };
81013
81014 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
81015diff --git a/include/linux/cache.h b/include/linux/cache.h
81016index 17e7e82..1d7da26 100644
81017--- a/include/linux/cache.h
81018+++ b/include/linux/cache.h
81019@@ -16,6 +16,14 @@
81020 #define __read_mostly
81021 #endif
81022
81023+#ifndef __read_only
81024+#ifdef CONFIG_PAX_KERNEXEC
81025+#error KERNEXEC requires __read_only
81026+#else
81027+#define __read_only __read_mostly
81028+#endif
81029+#endif
81030+
81031 #ifndef ____cacheline_aligned
81032 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
81033 #endif
81034diff --git a/include/linux/capability.h b/include/linux/capability.h
81035index aa93e5e..985a1b0 100644
81036--- a/include/linux/capability.h
81037+++ b/include/linux/capability.h
81038@@ -214,9 +214,14 @@ extern bool has_ns_capability_noaudit(struct task_struct *t,
81039 extern bool capable(int cap);
81040 extern bool ns_capable(struct user_namespace *ns, int cap);
81041 extern bool capable_wrt_inode_uidgid(const struct inode *inode, int cap);
81042+extern bool capable_wrt_inode_uidgid_nolog(const struct inode *inode, int cap);
81043 extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap);
81044+extern bool capable_nolog(int cap);
81045+extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
81046
81047 /* audit system wants to get cap info from files as well */
81048 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
81049
81050+extern int is_privileged_binary(const struct dentry *dentry);
81051+
81052 #endif /* !_LINUX_CAPABILITY_H */
81053diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h
81054index 8609d57..86e4d79 100644
81055--- a/include/linux/cdrom.h
81056+++ b/include/linux/cdrom.h
81057@@ -87,7 +87,6 @@ struct cdrom_device_ops {
81058
81059 /* driver specifications */
81060 const int capability; /* capability flags */
81061- int n_minors; /* number of active minor devices */
81062 /* handle uniform packets for scsi type devices (scsi,atapi) */
81063 int (*generic_packet) (struct cdrom_device_info *,
81064 struct packet_command *);
81065diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
81066index 4ce9056..86caac6 100644
81067--- a/include/linux/cleancache.h
81068+++ b/include/linux/cleancache.h
81069@@ -31,7 +31,7 @@ struct cleancache_ops {
81070 void (*invalidate_page)(int, struct cleancache_filekey, pgoff_t);
81071 void (*invalidate_inode)(int, struct cleancache_filekey);
81072 void (*invalidate_fs)(int);
81073-};
81074+} __no_const;
81075
81076 extern struct cleancache_ops *
81077 cleancache_register_ops(struct cleancache_ops *ops);
81078diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
81079index 411dd7e..ee38878 100644
81080--- a/include/linux/clk-provider.h
81081+++ b/include/linux/clk-provider.h
81082@@ -180,6 +180,7 @@ struct clk_ops {
81083 void (*init)(struct clk_hw *hw);
81084 int (*debug_init)(struct clk_hw *hw, struct dentry *dentry);
81085 };
81086+typedef struct clk_ops __no_const clk_ops_no_const;
81087
81088 /**
81089 * struct clk_init_data - holds init data that's common to all clocks and is
81090diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
81091index 653f0e2..abcafaa 100644
81092--- a/include/linux/clocksource.h
81093+++ b/include/linux/clocksource.h
81094@@ -287,7 +287,7 @@ extern struct clocksource* clocksource_get_next(void);
81095 extern void clocksource_change_rating(struct clocksource *cs, int rating);
81096 extern void clocksource_suspend(void);
81097 extern void clocksource_resume(void);
81098-extern struct clocksource * __init __weak clocksource_default_clock(void);
81099+extern struct clocksource * __init clocksource_default_clock(void);
81100 extern void clocksource_mark_unstable(struct clocksource *cs);
81101
81102 extern u64
81103diff --git a/include/linux/compat.h b/include/linux/compat.h
81104index e649426..a74047b 100644
81105--- a/include/linux/compat.h
81106+++ b/include/linux/compat.h
81107@@ -316,7 +316,7 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
81108 compat_size_t __user *len_ptr);
81109
81110 asmlinkage long compat_sys_ipc(u32, int, int, u32, compat_uptr_t, u32);
81111-asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg);
81112+asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg) __intentional_overflow(0);
81113 asmlinkage long compat_sys_semctl(int semid, int semnum, int cmd, int arg);
81114 asmlinkage long compat_sys_msgsnd(int msqid, compat_uptr_t msgp,
81115 compat_ssize_t msgsz, int msgflg);
81116@@ -436,7 +436,7 @@ extern int compat_ptrace_request(struct task_struct *child,
81117 extern long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
81118 compat_ulong_t addr, compat_ulong_t data);
81119 asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
81120- compat_long_t addr, compat_long_t data);
81121+ compat_ulong_t addr, compat_ulong_t data);
81122
81123 asmlinkage long compat_sys_lookup_dcookie(u32, u32, char __user *, compat_size_t);
81124 /*
81125diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
81126index 2507fd2..55203f8 100644
81127--- a/include/linux/compiler-gcc4.h
81128+++ b/include/linux/compiler-gcc4.h
81129@@ -39,9 +39,34 @@
81130 # define __compiletime_warning(message) __attribute__((warning(message)))
81131 # define __compiletime_error(message) __attribute__((error(message)))
81132 #endif /* __CHECKER__ */
81133+
81134+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
81135+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
81136+#define __bos0(ptr) __bos((ptr), 0)
81137+#define __bos1(ptr) __bos((ptr), 1)
81138 #endif /* GCC_VERSION >= 40300 */
81139
81140 #if GCC_VERSION >= 40500
81141+
81142+#ifdef RANDSTRUCT_PLUGIN
81143+#define __randomize_layout __attribute__((randomize_layout))
81144+#define __no_randomize_layout __attribute__((no_randomize_layout))
81145+#endif
81146+
81147+#ifdef CONSTIFY_PLUGIN
81148+#define __no_const __attribute__((no_const))
81149+#define __do_const __attribute__((do_const))
81150+#endif
81151+
81152+#ifdef SIZE_OVERFLOW_PLUGIN
81153+#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
81154+#define __intentional_overflow(...) __attribute__((intentional_overflow(__VA_ARGS__)))
81155+#endif
81156+
81157+#ifdef LATENT_ENTROPY_PLUGIN
81158+#define __latent_entropy __attribute__((latent_entropy))
81159+#endif
81160+
81161 /*
81162 * Mark a position in code as unreachable. This can be used to
81163 * suppress control flow warnings after asm blocks that transfer
81164diff --git a/include/linux/compiler.h b/include/linux/compiler.h
81165index d5ad7b1..3b74638 100644
81166--- a/include/linux/compiler.h
81167+++ b/include/linux/compiler.h
81168@@ -5,11 +5,14 @@
81169
81170 #ifdef __CHECKER__
81171 # define __user __attribute__((noderef, address_space(1)))
81172+# define __force_user __force __user
81173 # define __kernel __attribute__((address_space(0)))
81174+# define __force_kernel __force __kernel
81175 # define __safe __attribute__((safe))
81176 # define __force __attribute__((force))
81177 # define __nocast __attribute__((nocast))
81178 # define __iomem __attribute__((noderef, address_space(2)))
81179+# define __force_iomem __force __iomem
81180 # define __must_hold(x) __attribute__((context(x,1,1)))
81181 # define __acquires(x) __attribute__((context(x,0,1)))
81182 # define __releases(x) __attribute__((context(x,1,0)))
81183@@ -17,20 +20,37 @@
81184 # define __release(x) __context__(x,-1)
81185 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
81186 # define __percpu __attribute__((noderef, address_space(3)))
81187+# define __force_percpu __force __percpu
81188 #ifdef CONFIG_SPARSE_RCU_POINTER
81189 # define __rcu __attribute__((noderef, address_space(4)))
81190+# define __force_rcu __force __rcu
81191 #else
81192 # define __rcu
81193+# define __force_rcu
81194 #endif
81195 extern void __chk_user_ptr(const volatile void __user *);
81196 extern void __chk_io_ptr(const volatile void __iomem *);
81197 #else
81198-# define __user
81199-# define __kernel
81200+# ifdef CHECKER_PLUGIN
81201+//# define __user
81202+//# define __force_user
81203+//# define __kernel
81204+//# define __force_kernel
81205+# else
81206+# ifdef STRUCTLEAK_PLUGIN
81207+# define __user __attribute__((user))
81208+# else
81209+# define __user
81210+# endif
81211+# define __force_user
81212+# define __kernel
81213+# define __force_kernel
81214+# endif
81215 # define __safe
81216 # define __force
81217 # define __nocast
81218 # define __iomem
81219+# define __force_iomem
81220 # define __chk_user_ptr(x) (void)0
81221 # define __chk_io_ptr(x) (void)0
81222 # define __builtin_warning(x, y...) (1)
81223@@ -41,7 +61,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
81224 # define __release(x) (void)0
81225 # define __cond_lock(x,c) (c)
81226 # define __percpu
81227+# define __force_percpu
81228 # define __rcu
81229+# define __force_rcu
81230 #endif
81231
81232 /* Indirect macros required for expanded argument pasting, eg. __LINE__. */
81233@@ -286,6 +308,34 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
81234 # define __attribute_const__ /* unimplemented */
81235 #endif
81236
81237+#ifndef __randomize_layout
81238+# define __randomize_layout
81239+#endif
81240+
81241+#ifndef __no_randomize_layout
81242+# define __no_randomize_layout
81243+#endif
81244+
81245+#ifndef __no_const
81246+# define __no_const
81247+#endif
81248+
81249+#ifndef __do_const
81250+# define __do_const
81251+#endif
81252+
81253+#ifndef __size_overflow
81254+# define __size_overflow(...)
81255+#endif
81256+
81257+#ifndef __intentional_overflow
81258+# define __intentional_overflow(...)
81259+#endif
81260+
81261+#ifndef __latent_entropy
81262+# define __latent_entropy
81263+#endif
81264+
81265 /*
81266 * Tell gcc if a function is cold. The compiler will assume any path
81267 * directly leading to the call is unlikely.
81268@@ -295,6 +345,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
81269 #define __cold
81270 #endif
81271
81272+#ifndef __alloc_size
81273+#define __alloc_size(...)
81274+#endif
81275+
81276+#ifndef __bos
81277+#define __bos(ptr, arg)
81278+#endif
81279+
81280+#ifndef __bos0
81281+#define __bos0(ptr)
81282+#endif
81283+
81284+#ifndef __bos1
81285+#define __bos1(ptr)
81286+#endif
81287+
81288 /* Simple shorthand for a section definition */
81289 #ifndef __section
81290 # define __section(S) __attribute__ ((__section__(#S)))
81291@@ -378,7 +444,8 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
81292 * use is to mediate communication between process-level code and irq/NMI
81293 * handlers, all running on the same CPU.
81294 */
81295-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
81296+#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
81297+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
81298
81299 /* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */
81300 #ifdef CONFIG_KPROBES
81301diff --git a/include/linux/completion.h b/include/linux/completion.h
81302index 5d5aaae..0ea9b84 100644
81303--- a/include/linux/completion.h
81304+++ b/include/linux/completion.h
81305@@ -90,16 +90,16 @@ static inline void reinit_completion(struct completion *x)
81306
81307 extern void wait_for_completion(struct completion *);
81308 extern void wait_for_completion_io(struct completion *);
81309-extern int wait_for_completion_interruptible(struct completion *x);
81310-extern int wait_for_completion_killable(struct completion *x);
81311+extern int wait_for_completion_interruptible(struct completion *x) __intentional_overflow(-1);
81312+extern int wait_for_completion_killable(struct completion *x) __intentional_overflow(-1);
81313 extern unsigned long wait_for_completion_timeout(struct completion *x,
81314- unsigned long timeout);
81315+ unsigned long timeout) __intentional_overflow(-1);
81316 extern unsigned long wait_for_completion_io_timeout(struct completion *x,
81317- unsigned long timeout);
81318+ unsigned long timeout) __intentional_overflow(-1);
81319 extern long wait_for_completion_interruptible_timeout(
81320- struct completion *x, unsigned long timeout);
81321+ struct completion *x, unsigned long timeout) __intentional_overflow(-1);
81322 extern long wait_for_completion_killable_timeout(
81323- struct completion *x, unsigned long timeout);
81324+ struct completion *x, unsigned long timeout) __intentional_overflow(-1);
81325 extern bool try_wait_for_completion(struct completion *x);
81326 extern bool completion_done(struct completion *x);
81327
81328diff --git a/include/linux/configfs.h b/include/linux/configfs.h
81329index 34025df..d94bbbc 100644
81330--- a/include/linux/configfs.h
81331+++ b/include/linux/configfs.h
81332@@ -125,7 +125,7 @@ struct configfs_attribute {
81333 const char *ca_name;
81334 struct module *ca_owner;
81335 umode_t ca_mode;
81336-};
81337+} __do_const;
81338
81339 /*
81340 * Users often need to create attribute structures for their configurable
81341diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
81342index 7d1955a..d86a3ca 100644
81343--- a/include/linux/cpufreq.h
81344+++ b/include/linux/cpufreq.h
81345@@ -203,6 +203,7 @@ struct global_attr {
81346 ssize_t (*store)(struct kobject *a, struct attribute *b,
81347 const char *c, size_t count);
81348 };
81349+typedef struct global_attr __no_const global_attr_no_const;
81350
81351 #define define_one_global_ro(_name) \
81352 static struct global_attr _name = \
81353@@ -269,7 +270,7 @@ struct cpufreq_driver {
81354 bool boost_supported;
81355 bool boost_enabled;
81356 int (*set_boost) (int state);
81357-};
81358+} __do_const;
81359
81360 /* flags */
81361 #define CPUFREQ_STICKY (1 << 0) /* driver isn't removed even if
81362diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
81363index 25e0df6..952dffd 100644
81364--- a/include/linux/cpuidle.h
81365+++ b/include/linux/cpuidle.h
81366@@ -50,7 +50,8 @@ struct cpuidle_state {
81367 int index);
81368
81369 int (*enter_dead) (struct cpuidle_device *dev, int index);
81370-};
81371+} __do_const;
81372+typedef struct cpuidle_state __no_const cpuidle_state_no_const;
81373
81374 /* Idle State Flags */
81375 #define CPUIDLE_FLAG_TIME_VALID (0x01) /* is residency time measurable? */
81376@@ -209,7 +210,7 @@ struct cpuidle_governor {
81377 void (*reflect) (struct cpuidle_device *dev, int index);
81378
81379 struct module *owner;
81380-};
81381+} __do_const;
81382
81383 #ifdef CONFIG_CPU_IDLE
81384 extern int cpuidle_register_governor(struct cpuidle_governor *gov);
81385diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
81386index 2997af6..424ddc1 100644
81387--- a/include/linux/cpumask.h
81388+++ b/include/linux/cpumask.h
81389@@ -118,17 +118,17 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
81390 }
81391
81392 /* Valid inputs for n are -1 and 0. */
81393-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
81394+static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
81395 {
81396 return n+1;
81397 }
81398
81399-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
81400+static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
81401 {
81402 return n+1;
81403 }
81404
81405-static inline unsigned int cpumask_next_and(int n,
81406+static inline unsigned int __intentional_overflow(-1) cpumask_next_and(int n,
81407 const struct cpumask *srcp,
81408 const struct cpumask *andp)
81409 {
81410@@ -174,7 +174,7 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
81411 *
81412 * Returns >= nr_cpu_ids if no further cpus set.
81413 */
81414-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
81415+static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
81416 {
81417 /* -1 is a legal arg here. */
81418 if (n != -1)
81419@@ -189,7 +189,7 @@ static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
81420 *
81421 * Returns >= nr_cpu_ids if no further cpus unset.
81422 */
81423-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
81424+static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
81425 {
81426 /* -1 is a legal arg here. */
81427 if (n != -1)
81428@@ -197,7 +197,7 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
81429 return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
81430 }
81431
81432-int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
81433+int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *) __intentional_overflow(-1);
81434 int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
81435 int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp);
81436
81437diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h
81438index 72ab536..3849fce 100644
81439--- a/include/linux/crash_dump.h
81440+++ b/include/linux/crash_dump.h
81441@@ -14,14 +14,13 @@
81442 extern unsigned long long elfcorehdr_addr;
81443 extern unsigned long long elfcorehdr_size;
81444
81445-extern int __weak elfcorehdr_alloc(unsigned long long *addr,
81446- unsigned long long *size);
81447-extern void __weak elfcorehdr_free(unsigned long long addr);
81448-extern ssize_t __weak elfcorehdr_read(char *buf, size_t count, u64 *ppos);
81449-extern ssize_t __weak elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos);
81450-extern int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma,
81451- unsigned long from, unsigned long pfn,
81452- unsigned long size, pgprot_t prot);
81453+extern int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size);
81454+extern void elfcorehdr_free(unsigned long long addr);
81455+extern ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos);
81456+extern ssize_t elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos);
81457+extern int remap_oldmem_pfn_range(struct vm_area_struct *vma,
81458+ unsigned long from, unsigned long pfn,
81459+ unsigned long size, pgprot_t prot);
81460
81461 extern ssize_t copy_oldmem_page(unsigned long, char *, size_t,
81462 unsigned long, int);
81463diff --git a/include/linux/cred.h b/include/linux/cred.h
81464index b2d0820..2ecafd3 100644
81465--- a/include/linux/cred.h
81466+++ b/include/linux/cred.h
81467@@ -35,7 +35,7 @@ struct group_info {
81468 int nblocks;
81469 kgid_t small_block[NGROUPS_SMALL];
81470 kgid_t *blocks[0];
81471-};
81472+} __randomize_layout;
81473
81474 /**
81475 * get_group_info - Get a reference to a group info structure
81476@@ -136,7 +136,7 @@ struct cred {
81477 struct user_namespace *user_ns; /* user_ns the caps and keyrings are relative to. */
81478 struct group_info *group_info; /* supplementary groups for euid/fsgid */
81479 struct rcu_head rcu; /* RCU deletion hook */
81480-};
81481+} __randomize_layout;
81482
81483 extern void __put_cred(struct cred *);
81484 extern void exit_creds(struct task_struct *);
81485@@ -194,6 +194,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk)
81486 static inline void validate_process_creds(void)
81487 {
81488 }
81489+static inline void validate_task_creds(struct task_struct *task)
81490+{
81491+}
81492 #endif
81493
81494 /**
81495@@ -331,6 +334,7 @@ static inline void put_cred(const struct cred *_cred)
81496
81497 #define task_uid(task) (task_cred_xxx((task), uid))
81498 #define task_euid(task) (task_cred_xxx((task), euid))
81499+#define task_securebits(task) (task_cred_xxx((task), securebits))
81500
81501 #define current_cred_xxx(xxx) \
81502 ({ \
81503diff --git a/include/linux/crypto.h b/include/linux/crypto.h
81504index d45e949..51cf5ea 100644
81505--- a/include/linux/crypto.h
81506+++ b/include/linux/crypto.h
81507@@ -373,7 +373,7 @@ struct cipher_tfm {
81508 const u8 *key, unsigned int keylen);
81509 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
81510 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
81511-};
81512+} __no_const;
81513
81514 struct hash_tfm {
81515 int (*init)(struct hash_desc *desc);
81516@@ -394,13 +394,13 @@ struct compress_tfm {
81517 int (*cot_decompress)(struct crypto_tfm *tfm,
81518 const u8 *src, unsigned int slen,
81519 u8 *dst, unsigned int *dlen);
81520-};
81521+} __no_const;
81522
81523 struct rng_tfm {
81524 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
81525 unsigned int dlen);
81526 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
81527-};
81528+} __no_const;
81529
81530 #define crt_ablkcipher crt_u.ablkcipher
81531 #define crt_aead crt_u.aead
81532diff --git a/include/linux/ctype.h b/include/linux/ctype.h
81533index 653589e..4ef254a 100644
81534--- a/include/linux/ctype.h
81535+++ b/include/linux/ctype.h
81536@@ -56,7 +56,7 @@ static inline unsigned char __toupper(unsigned char c)
81537 * Fast implementation of tolower() for internal usage. Do not use in your
81538 * code.
81539 */
81540-static inline char _tolower(const char c)
81541+static inline unsigned char _tolower(const unsigned char c)
81542 {
81543 return c | 0x20;
81544 }
81545diff --git a/include/linux/dcache.h b/include/linux/dcache.h
81546index 75a227c..1456987 100644
81547--- a/include/linux/dcache.h
81548+++ b/include/linux/dcache.h
81549@@ -134,7 +134,7 @@ struct dentry {
81550 } d_u;
81551 struct list_head d_subdirs; /* our children */
81552 struct hlist_node d_alias; /* inode alias list */
81553-};
81554+} __randomize_layout;
81555
81556 /*
81557 * dentry->d_lock spinlock nesting subclasses:
81558diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
81559index 7925bf0..d5143d2 100644
81560--- a/include/linux/decompress/mm.h
81561+++ b/include/linux/decompress/mm.h
81562@@ -77,7 +77,7 @@ static void free(void *where)
81563 * warnings when not needed (indeed large_malloc / large_free are not
81564 * needed by inflate */
81565
81566-#define malloc(a) kmalloc(a, GFP_KERNEL)
81567+#define malloc(a) kmalloc((a), GFP_KERNEL)
81568 #define free(a) kfree(a)
81569
81570 #define large_malloc(a) vmalloc(a)
81571diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
81572index f1863dc..5c26074 100644
81573--- a/include/linux/devfreq.h
81574+++ b/include/linux/devfreq.h
81575@@ -114,7 +114,7 @@ struct devfreq_governor {
81576 int (*get_target_freq)(struct devfreq *this, unsigned long *freq);
81577 int (*event_handler)(struct devfreq *devfreq,
81578 unsigned int event, void *data);
81579-};
81580+} __do_const;
81581
81582 /**
81583 * struct devfreq - Device devfreq structure
81584diff --git a/include/linux/device.h b/include/linux/device.h
81585index 43d183a..03b6ba2 100644
81586--- a/include/linux/device.h
81587+++ b/include/linux/device.h
81588@@ -310,7 +310,7 @@ struct subsys_interface {
81589 struct list_head node;
81590 int (*add_dev)(struct device *dev, struct subsys_interface *sif);
81591 int (*remove_dev)(struct device *dev, struct subsys_interface *sif);
81592-};
81593+} __do_const;
81594
81595 int subsys_interface_register(struct subsys_interface *sif);
81596 void subsys_interface_unregister(struct subsys_interface *sif);
81597@@ -506,7 +506,7 @@ struct device_type {
81598 void (*release)(struct device *dev);
81599
81600 const struct dev_pm_ops *pm;
81601-};
81602+} __do_const;
81603
81604 /* interface for exporting device attributes */
81605 struct device_attribute {
81606@@ -516,11 +516,12 @@ struct device_attribute {
81607 ssize_t (*store)(struct device *dev, struct device_attribute *attr,
81608 const char *buf, size_t count);
81609 };
81610+typedef struct device_attribute __no_const device_attribute_no_const;
81611
81612 struct dev_ext_attribute {
81613 struct device_attribute attr;
81614 void *var;
81615-};
81616+} __do_const;
81617
81618 ssize_t device_show_ulong(struct device *dev, struct device_attribute *attr,
81619 char *buf);
81620diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
81621index 931b709..89b2d89 100644
81622--- a/include/linux/dma-mapping.h
81623+++ b/include/linux/dma-mapping.h
81624@@ -60,7 +60,7 @@ struct dma_map_ops {
81625 u64 (*get_required_mask)(struct device *dev);
81626 #endif
81627 int is_phys;
81628-};
81629+} __do_const;
81630
81631 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
81632
81633diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
81634index 1f9e642..39e4263 100644
81635--- a/include/linux/dmaengine.h
81636+++ b/include/linux/dmaengine.h
81637@@ -1147,9 +1147,9 @@ struct dma_pinned_list {
81638 struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len);
81639 void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list);
81640
81641-dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
81642+dma_cookie_t __intentional_overflow(0) dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
81643 struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len);
81644-dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
81645+dma_cookie_t __intentional_overflow(0) dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
81646 struct dma_pinned_list *pinned_list, struct page *page,
81647 unsigned int offset, size_t len);
81648
81649diff --git a/include/linux/efi.h b/include/linux/efi.h
81650index 45cb4ff..c9b4912 100644
81651--- a/include/linux/efi.h
81652+++ b/include/linux/efi.h
81653@@ -1036,6 +1036,7 @@ struct efivar_operations {
81654 efi_set_variable_t *set_variable;
81655 efi_query_variable_store_t *query_variable_store;
81656 };
81657+typedef struct efivar_operations __no_const efivar_operations_no_const;
81658
81659 struct efivars {
81660 /*
81661diff --git a/include/linux/elf.h b/include/linux/elf.h
81662index 67a5fa7..b817372 100644
81663--- a/include/linux/elf.h
81664+++ b/include/linux/elf.h
81665@@ -24,6 +24,7 @@ extern Elf32_Dyn _DYNAMIC [];
81666 #define elf_note elf32_note
81667 #define elf_addr_t Elf32_Off
81668 #define Elf_Half Elf32_Half
81669+#define elf_dyn Elf32_Dyn
81670
81671 #else
81672
81673@@ -34,6 +35,7 @@ extern Elf64_Dyn _DYNAMIC [];
81674 #define elf_note elf64_note
81675 #define elf_addr_t Elf64_Off
81676 #define Elf_Half Elf64_Half
81677+#define elf_dyn Elf64_Dyn
81678
81679 #endif
81680
81681diff --git a/include/linux/err.h b/include/linux/err.h
81682index a729120..6ede2c9 100644
81683--- a/include/linux/err.h
81684+++ b/include/linux/err.h
81685@@ -20,12 +20,12 @@
81686
81687 #define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO)
81688
81689-static inline void * __must_check ERR_PTR(long error)
81690+static inline void * __must_check __intentional_overflow(-1) ERR_PTR(long error)
81691 {
81692 return (void *) error;
81693 }
81694
81695-static inline long __must_check PTR_ERR(__force const void *ptr)
81696+static inline long __must_check __intentional_overflow(-1) PTR_ERR(__force const void *ptr)
81697 {
81698 return (long) ptr;
81699 }
81700diff --git a/include/linux/extcon.h b/include/linux/extcon.h
81701index 36f49c4..a2a1f4c 100644
81702--- a/include/linux/extcon.h
81703+++ b/include/linux/extcon.h
81704@@ -135,7 +135,7 @@ struct extcon_dev {
81705 /* /sys/class/extcon/.../mutually_exclusive/... */
81706 struct attribute_group attr_g_muex;
81707 struct attribute **attrs_muex;
81708- struct device_attribute *d_attrs_muex;
81709+ device_attribute_no_const *d_attrs_muex;
81710 };
81711
81712 /**
81713diff --git a/include/linux/fb.h b/include/linux/fb.h
81714index 09bb7a1..d98870a 100644
81715--- a/include/linux/fb.h
81716+++ b/include/linux/fb.h
81717@@ -305,7 +305,7 @@ struct fb_ops {
81718 /* called at KDB enter and leave time to prepare the console */
81719 int (*fb_debug_enter)(struct fb_info *info);
81720 int (*fb_debug_leave)(struct fb_info *info);
81721-};
81722+} __do_const;
81723
81724 #ifdef CONFIG_FB_TILEBLITTING
81725 #define FB_TILE_CURSOR_NONE 0
81726diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h
81727index 230f87b..1fd0485 100644
81728--- a/include/linux/fdtable.h
81729+++ b/include/linux/fdtable.h
81730@@ -100,7 +100,7 @@ struct files_struct *get_files_struct(struct task_struct *);
81731 void put_files_struct(struct files_struct *fs);
81732 void reset_files_struct(struct files_struct *);
81733 int unshare_files(struct files_struct **);
81734-struct files_struct *dup_fd(struct files_struct *, int *);
81735+struct files_struct *dup_fd(struct files_struct *, int *) __latent_entropy;
81736 void do_close_on_exec(struct files_struct *);
81737 int iterate_fd(struct files_struct *, unsigned,
81738 int (*)(const void *, struct file *, unsigned),
81739diff --git a/include/linux/filter.h b/include/linux/filter.h
81740index a5227ab..c789945 100644
81741--- a/include/linux/filter.h
81742+++ b/include/linux/filter.h
81743@@ -9,6 +9,11 @@
81744 #include <linux/skbuff.h>
81745 #include <linux/workqueue.h>
81746 #include <uapi/linux/filter.h>
81747+#include <asm/cacheflush.h>
81748+
81749+struct sk_buff;
81750+struct sock;
81751+struct seccomp_data;
81752
81753 /* Internally used and optimized filter representation with extended
81754 * instruction set based on top of classic BPF.
81755@@ -320,20 +325,23 @@ struct sock_fprog_kern {
81756 struct sock_filter *filter;
81757 };
81758
81759-struct sk_buff;
81760-struct sock;
81761-struct seccomp_data;
81762+struct bpf_work_struct {
81763+ struct bpf_prog *prog;
81764+ struct work_struct work;
81765+};
81766
81767 struct bpf_prog {
81768+ u32 pages; /* Number of allocated pages */
81769 u32 jited:1, /* Is our filter JIT'ed? */
81770 len:31; /* Number of filter blocks */
81771 struct sock_fprog_kern *orig_prog; /* Original BPF program */
81772+ struct bpf_work_struct *work; /* Deferred free work struct */
81773 unsigned int (*bpf_func)(const struct sk_buff *skb,
81774 const struct bpf_insn *filter);
81775+ /* Instructions for interpreter */
81776 union {
81777 struct sock_filter insns[0];
81778 struct bpf_insn insnsi[0];
81779- struct work_struct work;
81780 };
81781 };
81782
81783@@ -353,6 +361,26 @@ static inline unsigned int bpf_prog_size(unsigned int proglen)
81784
81785 #define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0]))
81786
81787+#ifdef CONFIG_DEBUG_SET_MODULE_RONX
81788+static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
81789+{
81790+ set_memory_ro((unsigned long)fp, fp->pages);
81791+}
81792+
81793+static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
81794+{
81795+ set_memory_rw((unsigned long)fp, fp->pages);
81796+}
81797+#else
81798+static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
81799+{
81800+}
81801+
81802+static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
81803+{
81804+}
81805+#endif /* CONFIG_DEBUG_SET_MODULE_RONX */
81806+
81807 int sk_filter(struct sock *sk, struct sk_buff *skb);
81808
81809 void bpf_prog_select_runtime(struct bpf_prog *fp);
81810@@ -361,6 +389,17 @@ void bpf_prog_free(struct bpf_prog *fp);
81811 int bpf_convert_filter(struct sock_filter *prog, int len,
81812 struct bpf_insn *new_prog, int *new_len);
81813
81814+struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags);
81815+struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
81816+ gfp_t gfp_extra_flags);
81817+void __bpf_prog_free(struct bpf_prog *fp);
81818+
81819+static inline void bpf_prog_unlock_free(struct bpf_prog *fp)
81820+{
81821+ bpf_prog_unlock_ro(fp);
81822+ __bpf_prog_free(fp);
81823+}
81824+
81825 int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog);
81826 void bpf_prog_destroy(struct bpf_prog *fp);
81827
81828@@ -450,7 +489,7 @@ static inline void bpf_jit_compile(struct bpf_prog *fp)
81829
81830 static inline void bpf_jit_free(struct bpf_prog *fp)
81831 {
81832- kfree(fp);
81833+ bpf_prog_unlock_free(fp);
81834 }
81835 #endif /* CONFIG_BPF_JIT */
81836
81837diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h
81838index 8293262..2b3b8bd 100644
81839--- a/include/linux/frontswap.h
81840+++ b/include/linux/frontswap.h
81841@@ -11,7 +11,7 @@ struct frontswap_ops {
81842 int (*load)(unsigned, pgoff_t, struct page *);
81843 void (*invalidate_page)(unsigned, pgoff_t);
81844 void (*invalidate_area)(unsigned);
81845-};
81846+} __no_const;
81847
81848 extern bool frontswap_enabled;
81849 extern struct frontswap_ops *
81850diff --git a/include/linux/fs.h b/include/linux/fs.h
81851index 9418772..0155807 100644
81852--- a/include/linux/fs.h
81853+++ b/include/linux/fs.h
81854@@ -401,7 +401,7 @@ struct address_space {
81855 spinlock_t private_lock; /* for use by the address_space */
81856 struct list_head private_list; /* ditto */
81857 void *private_data; /* ditto */
81858-} __attribute__((aligned(sizeof(long))));
81859+} __attribute__((aligned(sizeof(long)))) __randomize_layout;
81860 /*
81861 * On most architectures that alignment is already the case; but
81862 * must be enforced here for CRIS, to let the least significant bit
81863@@ -444,7 +444,7 @@ struct block_device {
81864 int bd_fsfreeze_count;
81865 /* Mutex for freeze */
81866 struct mutex bd_fsfreeze_mutex;
81867-};
81868+} __randomize_layout;
81869
81870 /*
81871 * Radix-tree tags, for tagging dirty and writeback pages within the pagecache
81872@@ -613,7 +613,7 @@ struct inode {
81873 #endif
81874
81875 void *i_private; /* fs or device private pointer */
81876-};
81877+} __randomize_layout;
81878
81879 static inline int inode_unhashed(struct inode *inode)
81880 {
81881@@ -806,7 +806,7 @@ struct file {
81882 struct list_head f_tfile_llink;
81883 #endif /* #ifdef CONFIG_EPOLL */
81884 struct address_space *f_mapping;
81885-} __attribute__((aligned(4))); /* lest something weird decides that 2 is OK */
81886+} __attribute__((aligned(4))) __randomize_layout; /* lest something weird decides that 2 is OK */
81887
81888 struct file_handle {
81889 __u32 handle_bytes;
81890@@ -934,7 +934,7 @@ struct file_lock {
81891 int state; /* state of grant or error if -ve */
81892 } afs;
81893 } fl_u;
81894-};
81895+} __randomize_layout;
81896
81897 /* The following constant reflects the upper bound of the file/locking space */
81898 #ifndef OFFSET_MAX
81899@@ -1284,7 +1284,7 @@ struct super_block {
81900 struct list_lru s_dentry_lru ____cacheline_aligned_in_smp;
81901 struct list_lru s_inode_lru ____cacheline_aligned_in_smp;
81902 struct rcu_head rcu;
81903-};
81904+} __randomize_layout;
81905
81906 extern struct timespec current_fs_time(struct super_block *sb);
81907
81908@@ -1510,7 +1510,8 @@ struct file_operations {
81909 long (*fallocate)(struct file *file, int mode, loff_t offset,
81910 loff_t len);
81911 int (*show_fdinfo)(struct seq_file *m, struct file *f);
81912-};
81913+} __do_const __randomize_layout;
81914+typedef struct file_operations __no_const file_operations_no_const;
81915
81916 struct inode_operations {
81917 struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
81918@@ -2796,4 +2797,14 @@ static inline bool dir_relax(struct inode *inode)
81919 return !IS_DEADDIR(inode);
81920 }
81921
81922+static inline bool is_sidechannel_device(const struct inode *inode)
81923+{
81924+#ifdef CONFIG_GRKERNSEC_DEVICE_SIDECHANNEL
81925+ umode_t mode = inode->i_mode;
81926+ return ((S_ISCHR(mode) || S_ISBLK(mode)) && (mode & (S_IROTH | S_IWOTH)));
81927+#else
81928+ return false;
81929+#endif
81930+}
81931+
81932 #endif /* _LINUX_FS_H */
81933diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
81934index 0efc3e6..fd23610 100644
81935--- a/include/linux/fs_struct.h
81936+++ b/include/linux/fs_struct.h
81937@@ -6,13 +6,13 @@
81938 #include <linux/seqlock.h>
81939
81940 struct fs_struct {
81941- int users;
81942+ atomic_t users;
81943 spinlock_t lock;
81944 seqcount_t seq;
81945 int umask;
81946 int in_exec;
81947 struct path root, pwd;
81948-};
81949+} __randomize_layout;
81950
81951 extern struct kmem_cache *fs_cachep;
81952
81953diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
81954index 7714849..a4a5c7a 100644
81955--- a/include/linux/fscache-cache.h
81956+++ b/include/linux/fscache-cache.h
81957@@ -113,7 +113,7 @@ struct fscache_operation {
81958 fscache_operation_release_t release;
81959 };
81960
81961-extern atomic_t fscache_op_debug_id;
81962+extern atomic_unchecked_t fscache_op_debug_id;
81963 extern void fscache_op_work_func(struct work_struct *work);
81964
81965 extern void fscache_enqueue_operation(struct fscache_operation *);
81966@@ -135,7 +135,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
81967 INIT_WORK(&op->work, fscache_op_work_func);
81968 atomic_set(&op->usage, 1);
81969 op->state = FSCACHE_OP_ST_INITIALISED;
81970- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
81971+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
81972 op->processor = processor;
81973 op->release = release;
81974 INIT_LIST_HEAD(&op->pend_link);
81975diff --git a/include/linux/fscache.h b/include/linux/fscache.h
81976index 115bb81..e7b812b 100644
81977--- a/include/linux/fscache.h
81978+++ b/include/linux/fscache.h
81979@@ -152,7 +152,7 @@ struct fscache_cookie_def {
81980 * - this is mandatory for any object that may have data
81981 */
81982 void (*now_uncached)(void *cookie_netfs_data);
81983-};
81984+} __do_const;
81985
81986 /*
81987 * fscache cached network filesystem type
81988diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
81989index 1c804b0..1432c2b 100644
81990--- a/include/linux/fsnotify.h
81991+++ b/include/linux/fsnotify.h
81992@@ -195,6 +195,9 @@ static inline void fsnotify_access(struct file *file)
81993 struct inode *inode = file_inode(file);
81994 __u32 mask = FS_ACCESS;
81995
81996+ if (is_sidechannel_device(inode))
81997+ return;
81998+
81999 if (S_ISDIR(inode->i_mode))
82000 mask |= FS_ISDIR;
82001
82002@@ -213,6 +216,9 @@ static inline void fsnotify_modify(struct file *file)
82003 struct inode *inode = file_inode(file);
82004 __u32 mask = FS_MODIFY;
82005
82006+ if (is_sidechannel_device(inode))
82007+ return;
82008+
82009 if (S_ISDIR(inode->i_mode))
82010 mask |= FS_ISDIR;
82011
82012@@ -315,7 +321,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
82013 */
82014 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
82015 {
82016- return kstrdup(name, GFP_KERNEL);
82017+ return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
82018 }
82019
82020 /*
82021diff --git a/include/linux/genhd.h b/include/linux/genhd.h
82022index ec274e0..e678159 100644
82023--- a/include/linux/genhd.h
82024+++ b/include/linux/genhd.h
82025@@ -194,7 +194,7 @@ struct gendisk {
82026 struct kobject *slave_dir;
82027
82028 struct timer_rand_state *random;
82029- atomic_t sync_io; /* RAID */
82030+ atomic_unchecked_t sync_io; /* RAID */
82031 struct disk_events *ev;
82032 #ifdef CONFIG_BLK_DEV_INTEGRITY
82033 struct blk_integrity *integrity;
82034@@ -435,7 +435,7 @@ extern void disk_flush_events(struct gendisk *disk, unsigned int mask);
82035 extern unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask);
82036
82037 /* drivers/char/random.c */
82038-extern void add_disk_randomness(struct gendisk *disk);
82039+extern void add_disk_randomness(struct gendisk *disk) __latent_entropy;
82040 extern void rand_initialize_disk(struct gendisk *disk);
82041
82042 static inline sector_t get_start_sect(struct block_device *bdev)
82043diff --git a/include/linux/genl_magic_func.h b/include/linux/genl_magic_func.h
82044index c0894dd..2fbf10c 100644
82045--- a/include/linux/genl_magic_func.h
82046+++ b/include/linux/genl_magic_func.h
82047@@ -246,7 +246,7 @@ const char *CONCAT_(GENL_MAGIC_FAMILY, _genl_cmd_to_str)(__u8 cmd)
82048 },
82049
82050 #define ZZZ_genl_ops CONCAT_(GENL_MAGIC_FAMILY, _genl_ops)
82051-static struct genl_ops ZZZ_genl_ops[] __read_mostly = {
82052+static struct genl_ops ZZZ_genl_ops[] = {
82053 #include GENL_MAGIC_INCLUDE_FILE
82054 };
82055
82056diff --git a/include/linux/gfp.h b/include/linux/gfp.h
82057index 5e7219d..b1ed627 100644
82058--- a/include/linux/gfp.h
82059+++ b/include/linux/gfp.h
82060@@ -34,6 +34,13 @@ struct vm_area_struct;
82061 #define ___GFP_NO_KSWAPD 0x400000u
82062 #define ___GFP_OTHER_NODE 0x800000u
82063 #define ___GFP_WRITE 0x1000000u
82064+
82065+#ifdef CONFIG_PAX_USERCOPY_SLABS
82066+#define ___GFP_USERCOPY 0x2000000u
82067+#else
82068+#define ___GFP_USERCOPY 0
82069+#endif
82070+
82071 /* If the above are modified, __GFP_BITS_SHIFT may need updating */
82072
82073 /*
82074@@ -90,6 +97,7 @@ struct vm_area_struct;
82075 #define __GFP_NO_KSWAPD ((__force gfp_t)___GFP_NO_KSWAPD)
82076 #define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
82077 #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */
82078+#define __GFP_USERCOPY ((__force gfp_t)___GFP_USERCOPY)/* Allocator intends to copy page to/from userland */
82079
82080 /*
82081 * This may seem redundant, but it's a way of annotating false positives vs.
82082@@ -97,7 +105,7 @@ struct vm_area_struct;
82083 */
82084 #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
82085
82086-#define __GFP_BITS_SHIFT 25 /* Room for N __GFP_FOO bits */
82087+#define __GFP_BITS_SHIFT 26 /* Room for N __GFP_FOO bits */
82088 #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
82089
82090 /* This equals 0, but use constants in case they ever change */
82091@@ -155,6 +163,8 @@ struct vm_area_struct;
82092 /* 4GB DMA on some platforms */
82093 #define GFP_DMA32 __GFP_DMA32
82094
82095+#define GFP_USERCOPY __GFP_USERCOPY
82096+
82097 /* Convert GFP flags to their corresponding migrate type */
82098 static inline int allocflags_to_migratetype(gfp_t gfp_flags)
82099 {
82100diff --git a/include/linux/gracl.h b/include/linux/gracl.h
82101new file mode 100644
82102index 0000000..edb2cb6
82103--- /dev/null
82104+++ b/include/linux/gracl.h
82105@@ -0,0 +1,340 @@
82106+#ifndef GR_ACL_H
82107+#define GR_ACL_H
82108+
82109+#include <linux/grdefs.h>
82110+#include <linux/resource.h>
82111+#include <linux/capability.h>
82112+#include <linux/dcache.h>
82113+#include <asm/resource.h>
82114+
82115+/* Major status information */
82116+
82117+#define GR_VERSION "grsecurity 3.0"
82118+#define GRSECURITY_VERSION 0x3000
82119+
82120+enum {
82121+ GR_SHUTDOWN = 0,
82122+ GR_ENABLE = 1,
82123+ GR_SPROLE = 2,
82124+ GR_OLDRELOAD = 3,
82125+ GR_SEGVMOD = 4,
82126+ GR_STATUS = 5,
82127+ GR_UNSPROLE = 6,
82128+ GR_PASSSET = 7,
82129+ GR_SPROLEPAM = 8,
82130+ GR_RELOAD = 9,
82131+};
82132+
82133+/* Password setup definitions
82134+ * kernel/grhash.c */
82135+enum {
82136+ GR_PW_LEN = 128,
82137+ GR_SALT_LEN = 16,
82138+ GR_SHA_LEN = 32,
82139+};
82140+
82141+enum {
82142+ GR_SPROLE_LEN = 64,
82143+};
82144+
82145+enum {
82146+ GR_NO_GLOB = 0,
82147+ GR_REG_GLOB,
82148+ GR_CREATE_GLOB
82149+};
82150+
82151+#define GR_NLIMITS 32
82152+
82153+/* Begin Data Structures */
82154+
82155+struct sprole_pw {
82156+ unsigned char *rolename;
82157+ unsigned char salt[GR_SALT_LEN];
82158+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
82159+};
82160+
82161+struct name_entry {
82162+ __u32 key;
82163+ ino_t inode;
82164+ dev_t device;
82165+ char *name;
82166+ __u16 len;
82167+ __u8 deleted;
82168+ struct name_entry *prev;
82169+ struct name_entry *next;
82170+};
82171+
82172+struct inodev_entry {
82173+ struct name_entry *nentry;
82174+ struct inodev_entry *prev;
82175+ struct inodev_entry *next;
82176+};
82177+
82178+struct acl_role_db {
82179+ struct acl_role_label **r_hash;
82180+ __u32 r_size;
82181+};
82182+
82183+struct inodev_db {
82184+ struct inodev_entry **i_hash;
82185+ __u32 i_size;
82186+};
82187+
82188+struct name_db {
82189+ struct name_entry **n_hash;
82190+ __u32 n_size;
82191+};
82192+
82193+struct crash_uid {
82194+ uid_t uid;
82195+ unsigned long expires;
82196+};
82197+
82198+struct gr_hash_struct {
82199+ void **table;
82200+ void **nametable;
82201+ void *first;
82202+ __u32 table_size;
82203+ __u32 used_size;
82204+ int type;
82205+};
82206+
82207+/* Userspace Grsecurity ACL data structures */
82208+
82209+struct acl_subject_label {
82210+ char *filename;
82211+ ino_t inode;
82212+ dev_t device;
82213+ __u32 mode;
82214+ kernel_cap_t cap_mask;
82215+ kernel_cap_t cap_lower;
82216+ kernel_cap_t cap_invert_audit;
82217+
82218+ struct rlimit res[GR_NLIMITS];
82219+ __u32 resmask;
82220+
82221+ __u8 user_trans_type;
82222+ __u8 group_trans_type;
82223+ uid_t *user_transitions;
82224+ gid_t *group_transitions;
82225+ __u16 user_trans_num;
82226+ __u16 group_trans_num;
82227+
82228+ __u32 sock_families[2];
82229+ __u32 ip_proto[8];
82230+ __u32 ip_type;
82231+ struct acl_ip_label **ips;
82232+ __u32 ip_num;
82233+ __u32 inaddr_any_override;
82234+
82235+ __u32 crashes;
82236+ unsigned long expires;
82237+
82238+ struct acl_subject_label *parent_subject;
82239+ struct gr_hash_struct *hash;
82240+ struct acl_subject_label *prev;
82241+ struct acl_subject_label *next;
82242+
82243+ struct acl_object_label **obj_hash;
82244+ __u32 obj_hash_size;
82245+ __u16 pax_flags;
82246+};
82247+
82248+struct role_allowed_ip {
82249+ __u32 addr;
82250+ __u32 netmask;
82251+
82252+ struct role_allowed_ip *prev;
82253+ struct role_allowed_ip *next;
82254+};
82255+
82256+struct role_transition {
82257+ char *rolename;
82258+
82259+ struct role_transition *prev;
82260+ struct role_transition *next;
82261+};
82262+
82263+struct acl_role_label {
82264+ char *rolename;
82265+ uid_t uidgid;
82266+ __u16 roletype;
82267+
82268+ __u16 auth_attempts;
82269+ unsigned long expires;
82270+
82271+ struct acl_subject_label *root_label;
82272+ struct gr_hash_struct *hash;
82273+
82274+ struct acl_role_label *prev;
82275+ struct acl_role_label *next;
82276+
82277+ struct role_transition *transitions;
82278+ struct role_allowed_ip *allowed_ips;
82279+ uid_t *domain_children;
82280+ __u16 domain_child_num;
82281+
82282+ umode_t umask;
82283+
82284+ struct acl_subject_label **subj_hash;
82285+ __u32 subj_hash_size;
82286+};
82287+
82288+struct user_acl_role_db {
82289+ struct acl_role_label **r_table;
82290+ __u32 num_pointers; /* Number of allocations to track */
82291+ __u32 num_roles; /* Number of roles */
82292+ __u32 num_domain_children; /* Number of domain children */
82293+ __u32 num_subjects; /* Number of subjects */
82294+ __u32 num_objects; /* Number of objects */
82295+};
82296+
82297+struct acl_object_label {
82298+ char *filename;
82299+ ino_t inode;
82300+ dev_t device;
82301+ __u32 mode;
82302+
82303+ struct acl_subject_label *nested;
82304+ struct acl_object_label *globbed;
82305+
82306+ /* next two structures not used */
82307+
82308+ struct acl_object_label *prev;
82309+ struct acl_object_label *next;
82310+};
82311+
82312+struct acl_ip_label {
82313+ char *iface;
82314+ __u32 addr;
82315+ __u32 netmask;
82316+ __u16 low, high;
82317+ __u8 mode;
82318+ __u32 type;
82319+ __u32 proto[8];
82320+
82321+ /* next two structures not used */
82322+
82323+ struct acl_ip_label *prev;
82324+ struct acl_ip_label *next;
82325+};
82326+
82327+struct gr_arg {
82328+ struct user_acl_role_db role_db;
82329+ unsigned char pw[GR_PW_LEN];
82330+ unsigned char salt[GR_SALT_LEN];
82331+ unsigned char sum[GR_SHA_LEN];
82332+ unsigned char sp_role[GR_SPROLE_LEN];
82333+ struct sprole_pw *sprole_pws;
82334+ dev_t segv_device;
82335+ ino_t segv_inode;
82336+ uid_t segv_uid;
82337+ __u16 num_sprole_pws;
82338+ __u16 mode;
82339+};
82340+
82341+struct gr_arg_wrapper {
82342+ struct gr_arg *arg;
82343+ __u32 version;
82344+ __u32 size;
82345+};
82346+
82347+struct subject_map {
82348+ struct acl_subject_label *user;
82349+ struct acl_subject_label *kernel;
82350+ struct subject_map *prev;
82351+ struct subject_map *next;
82352+};
82353+
82354+struct acl_subj_map_db {
82355+ struct subject_map **s_hash;
82356+ __u32 s_size;
82357+};
82358+
82359+struct gr_policy_state {
82360+ struct sprole_pw **acl_special_roles;
82361+ __u16 num_sprole_pws;
82362+ struct acl_role_label *kernel_role;
82363+ struct acl_role_label *role_list;
82364+ struct acl_role_label *default_role;
82365+ struct acl_role_db acl_role_set;
82366+ struct acl_subj_map_db subj_map_set;
82367+ struct name_db name_set;
82368+ struct inodev_db inodev_set;
82369+};
82370+
82371+struct gr_alloc_state {
82372+ unsigned long alloc_stack_next;
82373+ unsigned long alloc_stack_size;
82374+ void **alloc_stack;
82375+};
82376+
82377+struct gr_reload_state {
82378+ struct gr_policy_state oldpolicy;
82379+ struct gr_alloc_state oldalloc;
82380+ struct gr_policy_state newpolicy;
82381+ struct gr_alloc_state newalloc;
82382+ struct gr_policy_state *oldpolicy_ptr;
82383+ struct gr_alloc_state *oldalloc_ptr;
82384+ unsigned char oldmode;
82385+};
82386+
82387+/* End Data Structures Section */
82388+
82389+/* Hash functions generated by empirical testing by Brad Spengler
82390+ Makes good use of the low bits of the inode. Generally 0-1 times
82391+ in loop for successful match. 0-3 for unsuccessful match.
82392+ Shift/add algorithm with modulus of table size and an XOR*/
82393+
82394+static __inline__ unsigned int
82395+gr_rhash(const uid_t uid, const __u16 type, const unsigned int sz)
82396+{
82397+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
82398+}
82399+
82400+ static __inline__ unsigned int
82401+gr_shash(const struct acl_subject_label *userp, const unsigned int sz)
82402+{
82403+ return ((const unsigned long)userp % sz);
82404+}
82405+
82406+static __inline__ unsigned int
82407+gr_fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
82408+{
82409+ return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
82410+}
82411+
82412+static __inline__ unsigned int
82413+gr_nhash(const char *name, const __u16 len, const unsigned int sz)
82414+{
82415+ return full_name_hash((const unsigned char *)name, len) % sz;
82416+}
82417+
82418+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
82419+ subj = NULL; \
82420+ iter = 0; \
82421+ while (iter < role->subj_hash_size) { \
82422+ if (subj == NULL) \
82423+ subj = role->subj_hash[iter]; \
82424+ if (subj == NULL) { \
82425+ iter++; \
82426+ continue; \
82427+ }
82428+
82429+#define FOR_EACH_SUBJECT_END(subj,iter) \
82430+ subj = subj->next; \
82431+ if (subj == NULL) \
82432+ iter++; \
82433+ }
82434+
82435+
82436+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
82437+ subj = role->hash->first; \
82438+ while (subj != NULL) {
82439+
82440+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
82441+ subj = subj->next; \
82442+ }
82443+
82444+#endif
82445+
82446diff --git a/include/linux/gracl_compat.h b/include/linux/gracl_compat.h
82447new file mode 100644
82448index 0000000..33ebd1f
82449--- /dev/null
82450+++ b/include/linux/gracl_compat.h
82451@@ -0,0 +1,156 @@
82452+#ifndef GR_ACL_COMPAT_H
82453+#define GR_ACL_COMPAT_H
82454+
82455+#include <linux/resource.h>
82456+#include <asm/resource.h>
82457+
82458+struct sprole_pw_compat {
82459+ compat_uptr_t rolename;
82460+ unsigned char salt[GR_SALT_LEN];
82461+ unsigned char sum[GR_SHA_LEN];
82462+};
82463+
82464+struct gr_hash_struct_compat {
82465+ compat_uptr_t table;
82466+ compat_uptr_t nametable;
82467+ compat_uptr_t first;
82468+ __u32 table_size;
82469+ __u32 used_size;
82470+ int type;
82471+};
82472+
82473+struct acl_subject_label_compat {
82474+ compat_uptr_t filename;
82475+ compat_ino_t inode;
82476+ __u32 device;
82477+ __u32 mode;
82478+ kernel_cap_t cap_mask;
82479+ kernel_cap_t cap_lower;
82480+ kernel_cap_t cap_invert_audit;
82481+
82482+ struct compat_rlimit res[GR_NLIMITS];
82483+ __u32 resmask;
82484+
82485+ __u8 user_trans_type;
82486+ __u8 group_trans_type;
82487+ compat_uptr_t user_transitions;
82488+ compat_uptr_t group_transitions;
82489+ __u16 user_trans_num;
82490+ __u16 group_trans_num;
82491+
82492+ __u32 sock_families[2];
82493+ __u32 ip_proto[8];
82494+ __u32 ip_type;
82495+ compat_uptr_t ips;
82496+ __u32 ip_num;
82497+ __u32 inaddr_any_override;
82498+
82499+ __u32 crashes;
82500+ compat_ulong_t expires;
82501+
82502+ compat_uptr_t parent_subject;
82503+ compat_uptr_t hash;
82504+ compat_uptr_t prev;
82505+ compat_uptr_t next;
82506+
82507+ compat_uptr_t obj_hash;
82508+ __u32 obj_hash_size;
82509+ __u16 pax_flags;
82510+};
82511+
82512+struct role_allowed_ip_compat {
82513+ __u32 addr;
82514+ __u32 netmask;
82515+
82516+ compat_uptr_t prev;
82517+ compat_uptr_t next;
82518+};
82519+
82520+struct role_transition_compat {
82521+ compat_uptr_t rolename;
82522+
82523+ compat_uptr_t prev;
82524+ compat_uptr_t next;
82525+};
82526+
82527+struct acl_role_label_compat {
82528+ compat_uptr_t rolename;
82529+ uid_t uidgid;
82530+ __u16 roletype;
82531+
82532+ __u16 auth_attempts;
82533+ compat_ulong_t expires;
82534+
82535+ compat_uptr_t root_label;
82536+ compat_uptr_t hash;
82537+
82538+ compat_uptr_t prev;
82539+ compat_uptr_t next;
82540+
82541+ compat_uptr_t transitions;
82542+ compat_uptr_t allowed_ips;
82543+ compat_uptr_t domain_children;
82544+ __u16 domain_child_num;
82545+
82546+ umode_t umask;
82547+
82548+ compat_uptr_t subj_hash;
82549+ __u32 subj_hash_size;
82550+};
82551+
82552+struct user_acl_role_db_compat {
82553+ compat_uptr_t r_table;
82554+ __u32 num_pointers;
82555+ __u32 num_roles;
82556+ __u32 num_domain_children;
82557+ __u32 num_subjects;
82558+ __u32 num_objects;
82559+};
82560+
82561+struct acl_object_label_compat {
82562+ compat_uptr_t filename;
82563+ compat_ino_t inode;
82564+ __u32 device;
82565+ __u32 mode;
82566+
82567+ compat_uptr_t nested;
82568+ compat_uptr_t globbed;
82569+
82570+ compat_uptr_t prev;
82571+ compat_uptr_t next;
82572+};
82573+
82574+struct acl_ip_label_compat {
82575+ compat_uptr_t iface;
82576+ __u32 addr;
82577+ __u32 netmask;
82578+ __u16 low, high;
82579+ __u8 mode;
82580+ __u32 type;
82581+ __u32 proto[8];
82582+
82583+ compat_uptr_t prev;
82584+ compat_uptr_t next;
82585+};
82586+
82587+struct gr_arg_compat {
82588+ struct user_acl_role_db_compat role_db;
82589+ unsigned char pw[GR_PW_LEN];
82590+ unsigned char salt[GR_SALT_LEN];
82591+ unsigned char sum[GR_SHA_LEN];
82592+ unsigned char sp_role[GR_SPROLE_LEN];
82593+ compat_uptr_t sprole_pws;
82594+ __u32 segv_device;
82595+ compat_ino_t segv_inode;
82596+ uid_t segv_uid;
82597+ __u16 num_sprole_pws;
82598+ __u16 mode;
82599+};
82600+
82601+struct gr_arg_wrapper_compat {
82602+ compat_uptr_t arg;
82603+ __u32 version;
82604+ __u32 size;
82605+};
82606+
82607+#endif
82608diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
82609new file mode 100644
82610index 0000000..323ecf2
82611--- /dev/null
82612+++ b/include/linux/gralloc.h
82613@@ -0,0 +1,9 @@
82614+#ifndef __GRALLOC_H
82615+#define __GRALLOC_H
82616+
82617+void acl_free_all(void);
82618+int acl_alloc_stack_init(unsigned long size);
82619+void *acl_alloc(unsigned long len);
82620+void *acl_alloc_num(unsigned long num, unsigned long len);
82621+
82622+#endif
82623diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
82624new file mode 100644
82625index 0000000..be66033
82626--- /dev/null
82627+++ b/include/linux/grdefs.h
82628@@ -0,0 +1,140 @@
82629+#ifndef GRDEFS_H
82630+#define GRDEFS_H
82631+
82632+/* Begin grsecurity status declarations */
82633+
82634+enum {
82635+ GR_READY = 0x01,
82636+ GR_STATUS_INIT = 0x00 // disabled state
82637+};
82638+
82639+/* Begin ACL declarations */
82640+
82641+/* Role flags */
82642+
82643+enum {
82644+ GR_ROLE_USER = 0x0001,
82645+ GR_ROLE_GROUP = 0x0002,
82646+ GR_ROLE_DEFAULT = 0x0004,
82647+ GR_ROLE_SPECIAL = 0x0008,
82648+ GR_ROLE_AUTH = 0x0010,
82649+ GR_ROLE_NOPW = 0x0020,
82650+ GR_ROLE_GOD = 0x0040,
82651+ GR_ROLE_LEARN = 0x0080,
82652+ GR_ROLE_TPE = 0x0100,
82653+ GR_ROLE_DOMAIN = 0x0200,
82654+ GR_ROLE_PAM = 0x0400,
82655+ GR_ROLE_PERSIST = 0x0800
82656+};
82657+
82658+/* ACL Subject and Object mode flags */
82659+enum {
82660+ GR_DELETED = 0x80000000
82661+};
82662+
82663+/* ACL Object-only mode flags */
82664+enum {
82665+ GR_READ = 0x00000001,
82666+ GR_APPEND = 0x00000002,
82667+ GR_WRITE = 0x00000004,
82668+ GR_EXEC = 0x00000008,
82669+ GR_FIND = 0x00000010,
82670+ GR_INHERIT = 0x00000020,
82671+ GR_SETID = 0x00000040,
82672+ GR_CREATE = 0x00000080,
82673+ GR_DELETE = 0x00000100,
82674+ GR_LINK = 0x00000200,
82675+ GR_AUDIT_READ = 0x00000400,
82676+ GR_AUDIT_APPEND = 0x00000800,
82677+ GR_AUDIT_WRITE = 0x00001000,
82678+ GR_AUDIT_EXEC = 0x00002000,
82679+ GR_AUDIT_FIND = 0x00004000,
82680+ GR_AUDIT_INHERIT= 0x00008000,
82681+ GR_AUDIT_SETID = 0x00010000,
82682+ GR_AUDIT_CREATE = 0x00020000,
82683+ GR_AUDIT_DELETE = 0x00040000,
82684+ GR_AUDIT_LINK = 0x00080000,
82685+ GR_PTRACERD = 0x00100000,
82686+ GR_NOPTRACE = 0x00200000,
82687+ GR_SUPPRESS = 0x00400000,
82688+ GR_NOLEARN = 0x00800000,
82689+ GR_INIT_TRANSFER= 0x01000000
82690+};
82691+
82692+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
82693+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
82694+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
82695+
82696+/* ACL subject-only mode flags */
82697+enum {
82698+ GR_KILL = 0x00000001,
82699+ GR_VIEW = 0x00000002,
82700+ GR_PROTECTED = 0x00000004,
82701+ GR_LEARN = 0x00000008,
82702+ GR_OVERRIDE = 0x00000010,
82703+ /* just a placeholder, this mode is only used in userspace */
82704+ GR_DUMMY = 0x00000020,
82705+ GR_PROTSHM = 0x00000040,
82706+ GR_KILLPROC = 0x00000080,
82707+ GR_KILLIPPROC = 0x00000100,
82708+ /* just a placeholder, this mode is only used in userspace */
82709+ GR_NOTROJAN = 0x00000200,
82710+ GR_PROTPROCFD = 0x00000400,
82711+ GR_PROCACCT = 0x00000800,
82712+ GR_RELAXPTRACE = 0x00001000,
82713+ //GR_NESTED = 0x00002000,
82714+ GR_INHERITLEARN = 0x00004000,
82715+ GR_PROCFIND = 0x00008000,
82716+ GR_POVERRIDE = 0x00010000,
82717+ GR_KERNELAUTH = 0x00020000,
82718+ GR_ATSECURE = 0x00040000,
82719+ GR_SHMEXEC = 0x00080000
82720+};
82721+
82722+enum {
82723+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
82724+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
82725+ GR_PAX_ENABLE_MPROTECT = 0x0004,
82726+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
82727+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
82728+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
82729+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
82730+ GR_PAX_DISABLE_MPROTECT = 0x0400,
82731+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
82732+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
82733+};
82734+
82735+enum {
82736+ GR_ID_USER = 0x01,
82737+ GR_ID_GROUP = 0x02,
82738+};
82739+
82740+enum {
82741+ GR_ID_ALLOW = 0x01,
82742+ GR_ID_DENY = 0x02,
82743+};
82744+
82745+#define GR_CRASH_RES 31
82746+#define GR_UIDTABLE_MAX 500
82747+
82748+/* begin resource learning section */
82749+enum {
82750+ GR_RLIM_CPU_BUMP = 60,
82751+ GR_RLIM_FSIZE_BUMP = 50000,
82752+ GR_RLIM_DATA_BUMP = 10000,
82753+ GR_RLIM_STACK_BUMP = 1000,
82754+ GR_RLIM_CORE_BUMP = 10000,
82755+ GR_RLIM_RSS_BUMP = 500000,
82756+ GR_RLIM_NPROC_BUMP = 1,
82757+ GR_RLIM_NOFILE_BUMP = 5,
82758+ GR_RLIM_MEMLOCK_BUMP = 50000,
82759+ GR_RLIM_AS_BUMP = 500000,
82760+ GR_RLIM_LOCKS_BUMP = 2,
82761+ GR_RLIM_SIGPENDING_BUMP = 5,
82762+ GR_RLIM_MSGQUEUE_BUMP = 10000,
82763+ GR_RLIM_NICE_BUMP = 1,
82764+ GR_RLIM_RTPRIO_BUMP = 1,
82765+ GR_RLIM_RTTIME_BUMP = 1000000
82766+};
82767+
82768+#endif
82769diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
82770new file mode 100644
82771index 0000000..d25522e
82772--- /dev/null
82773+++ b/include/linux/grinternal.h
82774@@ -0,0 +1,229 @@
82775+#ifndef __GRINTERNAL_H
82776+#define __GRINTERNAL_H
82777+
82778+#ifdef CONFIG_GRKERNSEC
82779+
82780+#include <linux/fs.h>
82781+#include <linux/mnt_namespace.h>
82782+#include <linux/nsproxy.h>
82783+#include <linux/gracl.h>
82784+#include <linux/grdefs.h>
82785+#include <linux/grmsg.h>
82786+
82787+void gr_add_learn_entry(const char *fmt, ...)
82788+ __attribute__ ((format (printf, 1, 2)));
82789+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
82790+ const struct vfsmount *mnt);
82791+__u32 gr_check_create(const struct dentry *new_dentry,
82792+ const struct dentry *parent,
82793+ const struct vfsmount *mnt, const __u32 mode);
82794+int gr_check_protected_task(const struct task_struct *task);
82795+__u32 to_gr_audit(const __u32 reqmode);
82796+int gr_set_acls(const int type);
82797+int gr_acl_is_enabled(void);
82798+char gr_roletype_to_char(void);
82799+
82800+void gr_handle_alertkill(struct task_struct *task);
82801+char *gr_to_filename(const struct dentry *dentry,
82802+ const struct vfsmount *mnt);
82803+char *gr_to_filename1(const struct dentry *dentry,
82804+ const struct vfsmount *mnt);
82805+char *gr_to_filename2(const struct dentry *dentry,
82806+ const struct vfsmount *mnt);
82807+char *gr_to_filename3(const struct dentry *dentry,
82808+ const struct vfsmount *mnt);
82809+
82810+extern int grsec_enable_ptrace_readexec;
82811+extern int grsec_enable_harden_ptrace;
82812+extern int grsec_enable_link;
82813+extern int grsec_enable_fifo;
82814+extern int grsec_enable_execve;
82815+extern int grsec_enable_shm;
82816+extern int grsec_enable_execlog;
82817+extern int grsec_enable_signal;
82818+extern int grsec_enable_audit_ptrace;
82819+extern int grsec_enable_forkfail;
82820+extern int grsec_enable_time;
82821+extern int grsec_enable_rofs;
82822+extern int grsec_deny_new_usb;
82823+extern int grsec_enable_chroot_shmat;
82824+extern int grsec_enable_chroot_mount;
82825+extern int grsec_enable_chroot_double;
82826+extern int grsec_enable_chroot_pivot;
82827+extern int grsec_enable_chroot_chdir;
82828+extern int grsec_enable_chroot_chmod;
82829+extern int grsec_enable_chroot_mknod;
82830+extern int grsec_enable_chroot_fchdir;
82831+extern int grsec_enable_chroot_nice;
82832+extern int grsec_enable_chroot_execlog;
82833+extern int grsec_enable_chroot_caps;
82834+extern int grsec_enable_chroot_sysctl;
82835+extern int grsec_enable_chroot_unix;
82836+extern int grsec_enable_symlinkown;
82837+extern kgid_t grsec_symlinkown_gid;
82838+extern int grsec_enable_tpe;
82839+extern kgid_t grsec_tpe_gid;
82840+extern int grsec_enable_tpe_all;
82841+extern int grsec_enable_tpe_invert;
82842+extern int grsec_enable_socket_all;
82843+extern kgid_t grsec_socket_all_gid;
82844+extern int grsec_enable_socket_client;
82845+extern kgid_t grsec_socket_client_gid;
82846+extern int grsec_enable_socket_server;
82847+extern kgid_t grsec_socket_server_gid;
82848+extern kgid_t grsec_audit_gid;
82849+extern int grsec_enable_group;
82850+extern int grsec_enable_log_rwxmaps;
82851+extern int grsec_enable_mount;
82852+extern int grsec_enable_chdir;
82853+extern int grsec_resource_logging;
82854+extern int grsec_enable_blackhole;
82855+extern int grsec_lastack_retries;
82856+extern int grsec_enable_brute;
82857+extern int grsec_enable_harden_ipc;
82858+extern int grsec_lock;
82859+
82860+extern spinlock_t grsec_alert_lock;
82861+extern unsigned long grsec_alert_wtime;
82862+extern unsigned long grsec_alert_fyet;
82863+
82864+extern spinlock_t grsec_audit_lock;
82865+
82866+extern rwlock_t grsec_exec_file_lock;
82867+
82868+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
82869+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
82870+ (tsk)->exec_file->f_path.mnt) : "/")
82871+
82872+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
82873+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
82874+ (tsk)->real_parent->exec_file->f_path.mnt) : "/")
82875+
82876+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
82877+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
82878+ (tsk)->exec_file->f_path.mnt) : "/")
82879+
82880+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
82881+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
82882+ (tsk)->real_parent->exec_file->f_path.mnt) : "/")
82883+
82884+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
82885+
82886+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
82887+
82888+static inline bool gr_is_same_file(const struct file *file1, const struct file *file2)
82889+{
82890+ if (file1 && file2) {
82891+ const struct inode *inode1 = file1->f_path.dentry->d_inode;
82892+ const struct inode *inode2 = file2->f_path.dentry->d_inode;
82893+ if (inode1->i_ino == inode2->i_ino && inode1->i_sb->s_dev == inode2->i_sb->s_dev)
82894+ return true;
82895+ }
82896+
82897+ return false;
82898+}
82899+
82900+#define GR_CHROOT_CAPS {{ \
82901+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
82902+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
82903+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
82904+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
82905+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
82906+ CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
82907+ CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
82908+
82909+#define security_learn(normal_msg,args...) \
82910+({ \
82911+ read_lock(&grsec_exec_file_lock); \
82912+ gr_add_learn_entry(normal_msg "\n", ## args); \
82913+ read_unlock(&grsec_exec_file_lock); \
82914+})
82915+
82916+enum {
82917+ GR_DO_AUDIT,
82918+ GR_DONT_AUDIT,
82919+ /* used for non-audit messages that we shouldn't kill the task on */
82920+ GR_DONT_AUDIT_GOOD
82921+};
82922+
82923+enum {
82924+ GR_TTYSNIFF,
82925+ GR_RBAC,
82926+ GR_RBAC_STR,
82927+ GR_STR_RBAC,
82928+ GR_RBAC_MODE2,
82929+ GR_RBAC_MODE3,
82930+ GR_FILENAME,
82931+ GR_SYSCTL_HIDDEN,
82932+ GR_NOARGS,
82933+ GR_ONE_INT,
82934+ GR_ONE_INT_TWO_STR,
82935+ GR_ONE_STR,
82936+ GR_STR_INT,
82937+ GR_TWO_STR_INT,
82938+ GR_TWO_INT,
82939+ GR_TWO_U64,
82940+ GR_THREE_INT,
82941+ GR_FIVE_INT_TWO_STR,
82942+ GR_TWO_STR,
82943+ GR_THREE_STR,
82944+ GR_FOUR_STR,
82945+ GR_STR_FILENAME,
82946+ GR_FILENAME_STR,
82947+ GR_FILENAME_TWO_INT,
82948+ GR_FILENAME_TWO_INT_STR,
82949+ GR_TEXTREL,
82950+ GR_PTRACE,
82951+ GR_RESOURCE,
82952+ GR_CAP,
82953+ GR_SIG,
82954+ GR_SIG2,
82955+ GR_CRASH1,
82956+ GR_CRASH2,
82957+ GR_PSACCT,
82958+ GR_RWXMAP,
82959+ GR_RWXMAPVMA
82960+};
82961+
82962+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
82963+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
82964+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
82965+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
82966+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
82967+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
82968+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
82969+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
82970+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
82971+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
82972+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
82973+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
82974+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
82975+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
82976+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
82977+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
82978+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
82979+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
82980+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
82981+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
82982+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
82983+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
82984+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
82985+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
82986+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
82987+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
82988+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
82989+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
82990+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
82991+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
82992+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
82993+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
82994+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
82995+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
82996+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
82997+#define gr_log_rwxmap_vma(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAPVMA, str)
82998+
82999+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
83000+
83001+#endif
83002+
83003+#endif
83004diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
83005new file mode 100644
83006index 0000000..b02ba9d
83007--- /dev/null
83008+++ b/include/linux/grmsg.h
83009@@ -0,0 +1,117 @@
83010+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
83011+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
83012+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
83013+#define GR_STOPMOD_MSG "denied modification of module state by "
83014+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
83015+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
83016+#define GR_IOPERM_MSG "denied use of ioperm() by "
83017+#define GR_IOPL_MSG "denied use of iopl() by "
83018+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
83019+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
83020+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
83021+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
83022+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
83023+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
83024+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
83025+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
83026+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
83027+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
83028+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
83029+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
83030+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
83031+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
83032+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
83033+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
83034+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
83035+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
83036+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
83037+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
83038+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
83039+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
83040+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
83041+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
83042+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
83043+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
83044+#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
83045+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
83046+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
83047+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
83048+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
83049+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
83050+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
83051+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
83052+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
83053+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
83054+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
83055+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
83056+#define GR_CHROOT_FHANDLE_MSG "denied use of file handles inside chroot by "
83057+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
83058+#define GR_SETXATTR_ACL_MSG "%s setting extended attribute of %.950s by "
83059+#define GR_REMOVEXATTR_ACL_MSG "%s removing extended attribute of %.950s by "
83060+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
83061+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
83062+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
83063+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbage by "
83064+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
83065+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
83066+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
83067+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
83068+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
83069+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
83070+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
83071+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
83072+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
83073+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
83074+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
83075+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
83076+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
83077+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
83078+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
83079+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
83080+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
83081+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
83082+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
83083+#define GR_FAILFORK_MSG "failed fork with errno %s by "
83084+#define GR_NICE_CHROOT_MSG "denied priority change by "
83085+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
83086+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
83087+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
83088+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
83089+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
83090+#define GR_TIME_MSG "time set by "
83091+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
83092+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
83093+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
83094+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
83095+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
83096+#define GR_BIND_MSG "denied bind() by "
83097+#define GR_CONNECT_MSG "denied connect() by "
83098+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
83099+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
83100+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
83101+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
83102+#define GR_CAP_ACL_MSG "use of %s denied for "
83103+#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
83104+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
83105+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
83106+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
83107+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
83108+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
83109+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
83110+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
83111+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
83112+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
83113+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
83114+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
83115+#define GR_TEXTREL_AUDIT_MSG "denied text relocation in %.950s, VMA:0x%08lx 0x%08lx by "
83116+#define GR_PTGNUSTACK_MSG "denied marking stack executable as requested by PT_GNU_STACK marking in %.950s by "
83117+#define GR_VM86_MSG "denied use of vm86 by "
83118+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
83119+#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
83120+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
83121+#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
83122+#define GR_SYMLINKOWNER_MSG "denied following symlink %.950s since symlink owner %u does not match target owner %u, by "
83123+#define GR_BRUTE_DAEMON_MSG "bruteforce prevention initiated for the next 30 minutes or until service restarted, stalling each fork 30 seconds. Please investigate the crash report for "
83124+#define GR_BRUTE_SUID_MSG "bruteforce prevention initiated due to crash of %.950s against uid %u, banning suid/sgid execs for %u minutes. Please investigate the crash report for "
83125+#define GR_IPC_DENIED_MSG "denied %s of overly-permissive IPC object with creator uid %u by "
83126+#define GR_MSRWRITE_MSG "denied write to CPU MSR by "
83127diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
83128new file mode 100644
83129index 0000000..10b9635
83130--- /dev/null
83131+++ b/include/linux/grsecurity.h
83132@@ -0,0 +1,254 @@
83133+#ifndef GR_SECURITY_H
83134+#define GR_SECURITY_H
83135+#include <linux/fs.h>
83136+#include <linux/fs_struct.h>
83137+#include <linux/binfmts.h>
83138+#include <linux/gracl.h>
83139+
83140+/* notify of brain-dead configs */
83141+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
83142+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
83143+#endif
83144+#if defined(CONFIG_GRKERNSEC_PROC) && !defined(CONFIG_GRKERNSEC_PROC_USER) && !defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
83145+#error "CONFIG_GRKERNSEC_PROC enabled, but neither CONFIG_GRKERNSEC_PROC_USER nor CONFIG_GRKERNSEC_PROC_USERGROUP enabled"
83146+#endif
83147+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
83148+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
83149+#endif
83150+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
83151+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
83152+#endif
83153+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
83154+#error "CONFIG_PAX enabled, but no PaX options are enabled."
83155+#endif
83156+
83157+int gr_handle_new_usb(void);
83158+
83159+void gr_handle_brute_attach(int dumpable);
83160+void gr_handle_brute_check(void);
83161+void gr_handle_kernel_exploit(void);
83162+
83163+char gr_roletype_to_char(void);
83164+
83165+int gr_proc_is_restricted(void);
83166+
83167+int gr_acl_enable_at_secure(void);
83168+
83169+int gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs);
83170+int gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs);
83171+
83172+int gr_learn_cap(const struct task_struct *task, const struct cred *cred, const int cap);
83173+
83174+void gr_del_task_from_ip_table(struct task_struct *p);
83175+
83176+int gr_pid_is_chrooted(struct task_struct *p);
83177+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
83178+int gr_handle_chroot_nice(void);
83179+int gr_handle_chroot_sysctl(const int op);
83180+int gr_handle_chroot_setpriority(struct task_struct *p,
83181+ const int niceval);
83182+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
83183+int gr_chroot_fhandle(void);
83184+int gr_handle_chroot_chroot(const struct dentry *dentry,
83185+ const struct vfsmount *mnt);
83186+void gr_handle_chroot_chdir(const struct path *path);
83187+int gr_handle_chroot_chmod(const struct dentry *dentry,
83188+ const struct vfsmount *mnt, const int mode);
83189+int gr_handle_chroot_mknod(const struct dentry *dentry,
83190+ const struct vfsmount *mnt, const int mode);
83191+int gr_handle_chroot_mount(const struct dentry *dentry,
83192+ const struct vfsmount *mnt,
83193+ const char *dev_name);
83194+int gr_handle_chroot_pivot(void);
83195+int gr_handle_chroot_unix(const pid_t pid);
83196+
83197+int gr_handle_rawio(const struct inode *inode);
83198+
83199+void gr_handle_ioperm(void);
83200+void gr_handle_iopl(void);
83201+void gr_handle_msr_write(void);
83202+
83203+umode_t gr_acl_umask(void);
83204+
83205+int gr_tpe_allow(const struct file *file);
83206+
83207+void gr_set_chroot_entries(struct task_struct *task, const struct path *path);
83208+void gr_clear_chroot_entries(struct task_struct *task);
83209+
83210+void gr_log_forkfail(const int retval);
83211+void gr_log_timechange(void);
83212+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
83213+void gr_log_chdir(const struct dentry *dentry,
83214+ const struct vfsmount *mnt);
83215+void gr_log_chroot_exec(const struct dentry *dentry,
83216+ const struct vfsmount *mnt);
83217+void gr_log_remount(const char *devname, const int retval);
83218+void gr_log_unmount(const char *devname, const int retval);
83219+void gr_log_mount(const char *from, const char *to, const int retval);
83220+void gr_log_textrel(struct vm_area_struct *vma);
83221+void gr_log_ptgnustack(struct file *file);
83222+void gr_log_rwxmmap(struct file *file);
83223+void gr_log_rwxmprotect(struct vm_area_struct *vma);
83224+
83225+int gr_handle_follow_link(const struct inode *parent,
83226+ const struct inode *inode,
83227+ const struct dentry *dentry,
83228+ const struct vfsmount *mnt);
83229+int gr_handle_fifo(const struct dentry *dentry,
83230+ const struct vfsmount *mnt,
83231+ const struct dentry *dir, const int flag,
83232+ const int acc_mode);
83233+int gr_handle_hardlink(const struct dentry *dentry,
83234+ const struct vfsmount *mnt,
83235+ struct inode *inode,
83236+ const int mode, const struct filename *to);
83237+
83238+int gr_is_capable(const int cap);
83239+int gr_is_capable_nolog(const int cap);
83240+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
83241+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap);
83242+
83243+void gr_copy_label(struct task_struct *tsk);
83244+void gr_handle_crash(struct task_struct *task, const int sig);
83245+int gr_handle_signal(const struct task_struct *p, const int sig);
83246+int gr_check_crash_uid(const kuid_t uid);
83247+int gr_check_protected_task(const struct task_struct *task);
83248+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
83249+int gr_acl_handle_mmap(const struct file *file,
83250+ const unsigned long prot);
83251+int gr_acl_handle_mprotect(const struct file *file,
83252+ const unsigned long prot);
83253+int gr_check_hidden_task(const struct task_struct *tsk);
83254+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
83255+ const struct vfsmount *mnt);
83256+__u32 gr_acl_handle_utime(const struct dentry *dentry,
83257+ const struct vfsmount *mnt);
83258+__u32 gr_acl_handle_access(const struct dentry *dentry,
83259+ const struct vfsmount *mnt, const int fmode);
83260+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
83261+ const struct vfsmount *mnt, umode_t *mode);
83262+__u32 gr_acl_handle_chown(const struct dentry *dentry,
83263+ const struct vfsmount *mnt);
83264+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
83265+ const struct vfsmount *mnt);
83266+__u32 gr_acl_handle_removexattr(const struct dentry *dentry,
83267+ const struct vfsmount *mnt);
83268+int gr_handle_ptrace(struct task_struct *task, const long request);
83269+int gr_handle_proc_ptrace(struct task_struct *task);
83270+__u32 gr_acl_handle_execve(const struct dentry *dentry,
83271+ const struct vfsmount *mnt);
83272+int gr_check_crash_exec(const struct file *filp);
83273+int gr_acl_is_enabled(void);
83274+void gr_set_role_label(struct task_struct *task, const kuid_t uid,
83275+ const kgid_t gid);
83276+int gr_set_proc_label(const struct dentry *dentry,
83277+ const struct vfsmount *mnt,
83278+ const int unsafe_flags);
83279+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
83280+ const struct vfsmount *mnt);
83281+__u32 gr_acl_handle_open(const struct dentry *dentry,
83282+ const struct vfsmount *mnt, int acc_mode);
83283+__u32 gr_acl_handle_creat(const struct dentry *dentry,
83284+ const struct dentry *p_dentry,
83285+ const struct vfsmount *p_mnt,
83286+ int open_flags, int acc_mode, const int imode);
83287+void gr_handle_create(const struct dentry *dentry,
83288+ const struct vfsmount *mnt);
83289+void gr_handle_proc_create(const struct dentry *dentry,
83290+ const struct inode *inode);
83291+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
83292+ const struct dentry *parent_dentry,
83293+ const struct vfsmount *parent_mnt,
83294+ const int mode);
83295+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
83296+ const struct dentry *parent_dentry,
83297+ const struct vfsmount *parent_mnt);
83298+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
83299+ const struct vfsmount *mnt);
83300+void gr_handle_delete(const ino_t ino, const dev_t dev);
83301+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
83302+ const struct vfsmount *mnt);
83303+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
83304+ const struct dentry *parent_dentry,
83305+ const struct vfsmount *parent_mnt,
83306+ const struct filename *from);
83307+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
83308+ const struct dentry *parent_dentry,
83309+ const struct vfsmount *parent_mnt,
83310+ const struct dentry *old_dentry,
83311+ const struct vfsmount *old_mnt, const struct filename *to);
83312+int gr_handle_symlink_owner(const struct path *link, const struct inode *target);
83313+int gr_acl_handle_rename(struct dentry *new_dentry,
83314+ struct dentry *parent_dentry,
83315+ const struct vfsmount *parent_mnt,
83316+ struct dentry *old_dentry,
83317+ struct inode *old_parent_inode,
83318+ struct vfsmount *old_mnt, const struct filename *newname, unsigned int flags);
83319+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
83320+ struct dentry *old_dentry,
83321+ struct dentry *new_dentry,
83322+ struct vfsmount *mnt, const __u8 replace, unsigned int flags);
83323+__u32 gr_check_link(const struct dentry *new_dentry,
83324+ const struct dentry *parent_dentry,
83325+ const struct vfsmount *parent_mnt,
83326+ const struct dentry *old_dentry,
83327+ const struct vfsmount *old_mnt);
83328+int gr_acl_handle_filldir(const struct file *file, const char *name,
83329+ const unsigned int namelen, const ino_t ino);
83330+
83331+__u32 gr_acl_handle_unix(const struct dentry *dentry,
83332+ const struct vfsmount *mnt);
83333+void gr_acl_handle_exit(void);
83334+void gr_acl_handle_psacct(struct task_struct *task, const long code);
83335+int gr_acl_handle_procpidmem(const struct task_struct *task);
83336+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
83337+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
83338+void gr_audit_ptrace(struct task_struct *task);
83339+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
83340+void gr_put_exec_file(struct task_struct *task);
83341+
83342+int gr_ptrace_readexec(struct file *file, int unsafe_flags);
83343+
83344+#if defined(CONFIG_GRKERNSEC) && (defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC))
83345+extern void gr_learn_resource(const struct task_struct *task, const int res,
83346+ const unsigned long wanted, const int gt);
83347+#else
83348+static inline void gr_learn_resource(const struct task_struct *task, const int res,
83349+ const unsigned long wanted, const int gt)
83350+{
83351+}
83352+#endif
83353+
83354+#ifdef CONFIG_GRKERNSEC_RESLOG
83355+extern void gr_log_resource(const struct task_struct *task, const int res,
83356+ const unsigned long wanted, const int gt);
83357+#else
83358+static inline void gr_log_resource(const struct task_struct *task, const int res,
83359+ const unsigned long wanted, const int gt)
83360+{
83361+}
83362+#endif
83363+
83364+#ifdef CONFIG_GRKERNSEC
83365+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
83366+void gr_handle_vm86(void);
83367+void gr_handle_mem_readwrite(u64 from, u64 to);
83368+
83369+void gr_log_badprocpid(const char *entry);
83370+
83371+extern int grsec_enable_dmesg;
83372+extern int grsec_disable_privio;
83373+
83374+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
83375+extern kgid_t grsec_proc_gid;
83376+#endif
83377+
83378+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
83379+extern int grsec_enable_chroot_findtask;
83380+#endif
83381+#ifdef CONFIG_GRKERNSEC_SETXID
83382+extern int grsec_enable_setxid;
83383+#endif
83384+#endif
83385+
83386+#endif
83387diff --git a/include/linux/grsock.h b/include/linux/grsock.h
83388new file mode 100644
83389index 0000000..e7ffaaf
83390--- /dev/null
83391+++ b/include/linux/grsock.h
83392@@ -0,0 +1,19 @@
83393+#ifndef __GRSOCK_H
83394+#define __GRSOCK_H
83395+
83396+extern void gr_attach_curr_ip(const struct sock *sk);
83397+extern int gr_handle_sock_all(const int family, const int type,
83398+ const int protocol);
83399+extern int gr_handle_sock_server(const struct sockaddr *sck);
83400+extern int gr_handle_sock_server_other(const struct sock *sck);
83401+extern int gr_handle_sock_client(const struct sockaddr *sck);
83402+extern int gr_search_connect(struct socket * sock,
83403+ struct sockaddr_in * addr);
83404+extern int gr_search_bind(struct socket * sock,
83405+ struct sockaddr_in * addr);
83406+extern int gr_search_listen(struct socket * sock);
83407+extern int gr_search_accept(struct socket * sock);
83408+extern int gr_search_socket(const int domain, const int type,
83409+ const int protocol);
83410+
83411+#endif
83412diff --git a/include/linux/hash.h b/include/linux/hash.h
83413index d0494c3..69b7715 100644
83414--- a/include/linux/hash.h
83415+++ b/include/linux/hash.h
83416@@ -87,7 +87,7 @@ static inline u32 hash32_ptr(const void *ptr)
83417 struct fast_hash_ops {
83418 u32 (*hash)(const void *data, u32 len, u32 seed);
83419 u32 (*hash2)(const u32 *data, u32 len, u32 seed);
83420-};
83421+} __no_const;
83422
83423 /**
83424 * arch_fast_hash - Caclulates a hash over a given buffer that can have
83425diff --git a/include/linux/highmem.h b/include/linux/highmem.h
83426index 9286a46..373f27f 100644
83427--- a/include/linux/highmem.h
83428+++ b/include/linux/highmem.h
83429@@ -189,6 +189,18 @@ static inline void clear_highpage(struct page *page)
83430 kunmap_atomic(kaddr);
83431 }
83432
83433+static inline void sanitize_highpage(struct page *page)
83434+{
83435+ void *kaddr;
83436+ unsigned long flags;
83437+
83438+ local_irq_save(flags);
83439+ kaddr = kmap_atomic(page);
83440+ clear_page(kaddr);
83441+ kunmap_atomic(kaddr);
83442+ local_irq_restore(flags);
83443+}
83444+
83445 static inline void zero_user_segments(struct page *page,
83446 unsigned start1, unsigned end1,
83447 unsigned start2, unsigned end2)
83448diff --git a/include/linux/hwmon-sysfs.h b/include/linux/hwmon-sysfs.h
83449index 1c7b89a..7dda400 100644
83450--- a/include/linux/hwmon-sysfs.h
83451+++ b/include/linux/hwmon-sysfs.h
83452@@ -25,7 +25,8 @@
83453 struct sensor_device_attribute{
83454 struct device_attribute dev_attr;
83455 int index;
83456-};
83457+} __do_const;
83458+typedef struct sensor_device_attribute __no_const sensor_device_attribute_no_const;
83459 #define to_sensor_dev_attr(_dev_attr) \
83460 container_of(_dev_attr, struct sensor_device_attribute, dev_attr)
83461
83462@@ -41,7 +42,8 @@ struct sensor_device_attribute_2 {
83463 struct device_attribute dev_attr;
83464 u8 index;
83465 u8 nr;
83466-};
83467+} __do_const;
83468+typedef struct sensor_device_attribute_2 __no_const sensor_device_attribute_2_no_const;
83469 #define to_sensor_dev_attr_2(_dev_attr) \
83470 container_of(_dev_attr, struct sensor_device_attribute_2, dev_attr)
83471
83472diff --git a/include/linux/i2c.h b/include/linux/i2c.h
83473index b556e0a..c10a515 100644
83474--- a/include/linux/i2c.h
83475+++ b/include/linux/i2c.h
83476@@ -378,6 +378,7 @@ struct i2c_algorithm {
83477 /* To determine what the adapter supports */
83478 u32 (*functionality) (struct i2c_adapter *);
83479 };
83480+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
83481
83482 /**
83483 * struct i2c_bus_recovery_info - I2C bus recovery information
83484diff --git a/include/linux/i2o.h b/include/linux/i2o.h
83485index d23c3c2..eb63c81 100644
83486--- a/include/linux/i2o.h
83487+++ b/include/linux/i2o.h
83488@@ -565,7 +565,7 @@ struct i2o_controller {
83489 struct i2o_device *exec; /* Executive */
83490 #if BITS_PER_LONG == 64
83491 spinlock_t context_list_lock; /* lock for context_list */
83492- atomic_t context_list_counter; /* needed for unique contexts */
83493+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
83494 struct list_head context_list; /* list of context id's
83495 and pointers */
83496 #endif
83497diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
83498index aff7ad8..3942bbd 100644
83499--- a/include/linux/if_pppox.h
83500+++ b/include/linux/if_pppox.h
83501@@ -76,7 +76,7 @@ struct pppox_proto {
83502 int (*ioctl)(struct socket *sock, unsigned int cmd,
83503 unsigned long arg);
83504 struct module *owner;
83505-};
83506+} __do_const;
83507
83508 extern int register_pppox_proto(int proto_num, const struct pppox_proto *pp);
83509 extern void unregister_pppox_proto(int proto_num);
83510diff --git a/include/linux/init.h b/include/linux/init.h
83511index 2df8e8d..3e1280d 100644
83512--- a/include/linux/init.h
83513+++ b/include/linux/init.h
83514@@ -37,9 +37,17 @@
83515 * section.
83516 */
83517
83518+#define add_init_latent_entropy __latent_entropy
83519+
83520+#ifdef CONFIG_MEMORY_HOTPLUG
83521+#define add_meminit_latent_entropy
83522+#else
83523+#define add_meminit_latent_entropy __latent_entropy
83524+#endif
83525+
83526 /* These are for everybody (although not all archs will actually
83527 discard it in modules) */
83528-#define __init __section(.init.text) __cold notrace
83529+#define __init __section(.init.text) __cold notrace add_init_latent_entropy
83530 #define __initdata __section(.init.data)
83531 #define __initconst __constsection(.init.rodata)
83532 #define __exitdata __section(.exit.data)
83533@@ -100,7 +108,7 @@
83534 #define __cpuexitconst
83535
83536 /* Used for MEMORY_HOTPLUG */
83537-#define __meminit __section(.meminit.text) __cold notrace
83538+#define __meminit __section(.meminit.text) __cold notrace add_meminit_latent_entropy
83539 #define __meminitdata __section(.meminit.data)
83540 #define __meminitconst __constsection(.meminit.rodata)
83541 #define __memexit __section(.memexit.text) __exitused __cold notrace
83542diff --git a/include/linux/init_task.h b/include/linux/init_task.h
83543index 2bb4c4f3..e0fac69 100644
83544--- a/include/linux/init_task.h
83545+++ b/include/linux/init_task.h
83546@@ -149,6 +149,12 @@ extern struct task_group root_task_group;
83547
83548 #define INIT_TASK_COMM "swapper"
83549
83550+#ifdef CONFIG_X86
83551+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
83552+#else
83553+#define INIT_TASK_THREAD_INFO
83554+#endif
83555+
83556 #ifdef CONFIG_RT_MUTEXES
83557 # define INIT_RT_MUTEXES(tsk) \
83558 .pi_waiters = RB_ROOT, \
83559@@ -196,6 +202,7 @@ extern struct task_group root_task_group;
83560 RCU_POINTER_INITIALIZER(cred, &init_cred), \
83561 .comm = INIT_TASK_COMM, \
83562 .thread = INIT_THREAD, \
83563+ INIT_TASK_THREAD_INFO \
83564 .fs = &init_fs, \
83565 .files = &init_files, \
83566 .signal = &init_signals, \
83567diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
83568index 698ad05..8601bb7 100644
83569--- a/include/linux/interrupt.h
83570+++ b/include/linux/interrupt.h
83571@@ -418,8 +418,8 @@ extern const char * const softirq_to_name[NR_SOFTIRQS];
83572
83573 struct softirq_action
83574 {
83575- void (*action)(struct softirq_action *);
83576-};
83577+ void (*action)(void);
83578+} __no_const;
83579
83580 asmlinkage void do_softirq(void);
83581 asmlinkage void __do_softirq(void);
83582@@ -433,7 +433,7 @@ static inline void do_softirq_own_stack(void)
83583 }
83584 #endif
83585
83586-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
83587+extern void open_softirq(int nr, void (*action)(void));
83588 extern void softirq_init(void);
83589 extern void __raise_softirq_irqoff(unsigned int nr);
83590
83591diff --git a/include/linux/iommu.h b/include/linux/iommu.h
83592index 20f9a52..63ee2e3 100644
83593--- a/include/linux/iommu.h
83594+++ b/include/linux/iommu.h
83595@@ -131,7 +131,7 @@ struct iommu_ops {
83596 u32 (*domain_get_windows)(struct iommu_domain *domain);
83597
83598 unsigned long pgsize_bitmap;
83599-};
83600+} __do_const;
83601
83602 #define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
83603 #define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */
83604diff --git a/include/linux/ioport.h b/include/linux/ioport.h
83605index 142ec54..873e033 100644
83606--- a/include/linux/ioport.h
83607+++ b/include/linux/ioport.h
83608@@ -161,7 +161,7 @@ struct resource *lookup_resource(struct resource *root, resource_size_t start);
83609 int adjust_resource(struct resource *res, resource_size_t start,
83610 resource_size_t size);
83611 resource_size_t resource_alignment(struct resource *res);
83612-static inline resource_size_t resource_size(const struct resource *res)
83613+static inline resource_size_t __intentional_overflow(-1) resource_size(const struct resource *res)
83614 {
83615 return res->end - res->start + 1;
83616 }
83617diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
83618index 35e7eca..6afb7ad 100644
83619--- a/include/linux/ipc_namespace.h
83620+++ b/include/linux/ipc_namespace.h
83621@@ -69,7 +69,7 @@ struct ipc_namespace {
83622 struct user_namespace *user_ns;
83623
83624 unsigned int proc_inum;
83625-};
83626+} __randomize_layout;
83627
83628 extern struct ipc_namespace init_ipc_ns;
83629 extern atomic_t nr_ipc_ns;
83630diff --git a/include/linux/irq.h b/include/linux/irq.h
83631index 62af592..cc3b0d0 100644
83632--- a/include/linux/irq.h
83633+++ b/include/linux/irq.h
83634@@ -344,7 +344,8 @@ struct irq_chip {
83635 void (*irq_release_resources)(struct irq_data *data);
83636
83637 unsigned long flags;
83638-};
83639+} __do_const;
83640+typedef struct irq_chip __no_const irq_chip_no_const;
83641
83642 /*
83643 * irq_chip specific flags
83644diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
83645index 45e2d8c..26d85da 100644
83646--- a/include/linux/irqchip/arm-gic.h
83647+++ b/include/linux/irqchip/arm-gic.h
83648@@ -75,9 +75,11 @@
83649
83650 #ifndef __ASSEMBLY__
83651
83652+#include <linux/irq.h>
83653+
83654 struct device_node;
83655
83656-extern struct irq_chip gic_arch_extn;
83657+extern irq_chip_no_const gic_arch_extn;
83658
83659 void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *,
83660 u32 offset, struct device_node *);
83661diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
83662index c367cbd..c9b79e6 100644
83663--- a/include/linux/jiffies.h
83664+++ b/include/linux/jiffies.h
83665@@ -280,20 +280,20 @@ extern unsigned long preset_lpj;
83666 /*
83667 * Convert various time units to each other:
83668 */
83669-extern unsigned int jiffies_to_msecs(const unsigned long j);
83670-extern unsigned int jiffies_to_usecs(const unsigned long j);
83671+extern unsigned int jiffies_to_msecs(const unsigned long j) __intentional_overflow(-1);
83672+extern unsigned int jiffies_to_usecs(const unsigned long j) __intentional_overflow(-1);
83673
83674-static inline u64 jiffies_to_nsecs(const unsigned long j)
83675+static inline u64 __intentional_overflow(-1) jiffies_to_nsecs(const unsigned long j)
83676 {
83677 return (u64)jiffies_to_usecs(j) * NSEC_PER_USEC;
83678 }
83679
83680-extern unsigned long msecs_to_jiffies(const unsigned int m);
83681-extern unsigned long usecs_to_jiffies(const unsigned int u);
83682+extern unsigned long msecs_to_jiffies(const unsigned int m) __intentional_overflow(-1);
83683+extern unsigned long usecs_to_jiffies(const unsigned int u) __intentional_overflow(-1);
83684 extern unsigned long timespec_to_jiffies(const struct timespec *value);
83685 extern void jiffies_to_timespec(const unsigned long jiffies,
83686- struct timespec *value);
83687-extern unsigned long timeval_to_jiffies(const struct timeval *value);
83688+ struct timespec *value) __intentional_overflow(-1);
83689+extern unsigned long timeval_to_jiffies(const struct timeval *value) __intentional_overflow(-1);
83690 extern void jiffies_to_timeval(const unsigned long jiffies,
83691 struct timeval *value);
83692
83693diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
83694index 6883e19..e854fcb 100644
83695--- a/include/linux/kallsyms.h
83696+++ b/include/linux/kallsyms.h
83697@@ -15,7 +15,8 @@
83698
83699 struct module;
83700
83701-#ifdef CONFIG_KALLSYMS
83702+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
83703+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
83704 /* Lookup the address for a symbol. Returns 0 if not found. */
83705 unsigned long kallsyms_lookup_name(const char *name);
83706
83707@@ -106,6 +107,21 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
83708 /* Stupid that this does nothing, but I didn't create this mess. */
83709 #define __print_symbol(fmt, addr)
83710 #endif /*CONFIG_KALLSYMS*/
83711+#else /* when included by kallsyms.c, vsnprintf.c, kprobes.c, or
83712+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
83713+extern unsigned long kallsyms_lookup_name(const char *name);
83714+extern void __print_symbol(const char *fmt, unsigned long address);
83715+extern int sprint_backtrace(char *buffer, unsigned long address);
83716+extern int sprint_symbol(char *buffer, unsigned long address);
83717+extern int sprint_symbol_no_offset(char *buffer, unsigned long address);
83718+const char *kallsyms_lookup(unsigned long addr,
83719+ unsigned long *symbolsize,
83720+ unsigned long *offset,
83721+ char **modname, char *namebuf);
83722+extern int kallsyms_lookup_size_offset(unsigned long addr,
83723+ unsigned long *symbolsize,
83724+ unsigned long *offset);
83725+#endif
83726
83727 /* This macro allows us to keep printk typechecking */
83728 static __printf(1, 2)
83729diff --git a/include/linux/key-type.h b/include/linux/key-type.h
83730index 44792ee..6172f2a 100644
83731--- a/include/linux/key-type.h
83732+++ b/include/linux/key-type.h
83733@@ -132,7 +132,7 @@ struct key_type {
83734 /* internal fields */
83735 struct list_head link; /* link in types list */
83736 struct lock_class_key lock_class; /* key->sem lock class */
83737-};
83738+} __do_const;
83739
83740 extern struct key_type key_type_keyring;
83741
83742diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
83743index 6b06d37..19f605f 100644
83744--- a/include/linux/kgdb.h
83745+++ b/include/linux/kgdb.h
83746@@ -52,7 +52,7 @@ extern int kgdb_connected;
83747 extern int kgdb_io_module_registered;
83748
83749 extern atomic_t kgdb_setting_breakpoint;
83750-extern atomic_t kgdb_cpu_doing_single_step;
83751+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
83752
83753 extern struct task_struct *kgdb_usethread;
83754 extern struct task_struct *kgdb_contthread;
83755@@ -254,7 +254,7 @@ struct kgdb_arch {
83756 void (*correct_hw_break)(void);
83757
83758 void (*enable_nmi)(bool on);
83759-};
83760+} __do_const;
83761
83762 /**
83763 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
83764@@ -279,11 +279,11 @@ struct kgdb_io {
83765 void (*pre_exception) (void);
83766 void (*post_exception) (void);
83767 int is_console;
83768-};
83769+} __do_const;
83770
83771 extern struct kgdb_arch arch_kgdb_ops;
83772
83773-extern unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs);
83774+extern unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs);
83775
83776 #ifdef CONFIG_SERIAL_KGDB_NMI
83777 extern int kgdb_register_nmi_console(void);
83778diff --git a/include/linux/kmod.h b/include/linux/kmod.h
83779index 0555cc6..40116ce 100644
83780--- a/include/linux/kmod.h
83781+++ b/include/linux/kmod.h
83782@@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */
83783 * usually useless though. */
83784 extern __printf(2, 3)
83785 int __request_module(bool wait, const char *name, ...);
83786+extern __printf(3, 4)
83787+int ___request_module(bool wait, char *param_name, const char *name, ...);
83788 #define request_module(mod...) __request_module(true, mod)
83789 #define request_module_nowait(mod...) __request_module(false, mod)
83790 #define try_then_request_module(x, mod...) \
83791@@ -57,6 +59,9 @@ struct subprocess_info {
83792 struct work_struct work;
83793 struct completion *complete;
83794 char *path;
83795+#ifdef CONFIG_GRKERNSEC
83796+ char *origpath;
83797+#endif
83798 char **argv;
83799 char **envp;
83800 int wait;
83801diff --git a/include/linux/kobject.h b/include/linux/kobject.h
83802index 2d61b90..a1d0a13 100644
83803--- a/include/linux/kobject.h
83804+++ b/include/linux/kobject.h
83805@@ -118,7 +118,7 @@ struct kobj_type {
83806 struct attribute **default_attrs;
83807 const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj);
83808 const void *(*namespace)(struct kobject *kobj);
83809-};
83810+} __do_const;
83811
83812 struct kobj_uevent_env {
83813 char *argv[3];
83814@@ -142,6 +142,7 @@ struct kobj_attribute {
83815 ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr,
83816 const char *buf, size_t count);
83817 };
83818+typedef struct kobj_attribute __no_const kobj_attribute_no_const;
83819
83820 extern const struct sysfs_ops kobj_sysfs_ops;
83821
83822@@ -169,7 +170,7 @@ struct kset {
83823 spinlock_t list_lock;
83824 struct kobject kobj;
83825 const struct kset_uevent_ops *uevent_ops;
83826-};
83827+} __randomize_layout;
83828
83829 extern void kset_init(struct kset *kset);
83830 extern int __must_check kset_register(struct kset *kset);
83831diff --git a/include/linux/kobject_ns.h b/include/linux/kobject_ns.h
83832index df32d25..fb52e27 100644
83833--- a/include/linux/kobject_ns.h
83834+++ b/include/linux/kobject_ns.h
83835@@ -44,7 +44,7 @@ struct kobj_ns_type_operations {
83836 const void *(*netlink_ns)(struct sock *sk);
83837 const void *(*initial_ns)(void);
83838 void (*drop_ns)(void *);
83839-};
83840+} __do_const;
83841
83842 int kobj_ns_type_register(const struct kobj_ns_type_operations *ops);
83843 int kobj_ns_type_registered(enum kobj_ns_type type);
83844diff --git a/include/linux/kref.h b/include/linux/kref.h
83845index 484604d..0f6c5b6 100644
83846--- a/include/linux/kref.h
83847+++ b/include/linux/kref.h
83848@@ -68,7 +68,7 @@ static inline void kref_get(struct kref *kref)
83849 static inline int kref_sub(struct kref *kref, unsigned int count,
83850 void (*release)(struct kref *kref))
83851 {
83852- WARN_ON(release == NULL);
83853+ BUG_ON(release == NULL);
83854
83855 if (atomic_sub_and_test((int) count, &kref->refcount)) {
83856 release(kref);
83857diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
83858index a4c33b3..e854710 100644
83859--- a/include/linux/kvm_host.h
83860+++ b/include/linux/kvm_host.h
83861@@ -452,7 +452,7 @@ static inline void kvm_irqfd_exit(void)
83862 {
83863 }
83864 #endif
83865-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
83866+int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
83867 struct module *module);
83868 void kvm_exit(void);
83869
83870@@ -618,7 +618,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
83871 struct kvm_guest_debug *dbg);
83872 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
83873
83874-int kvm_arch_init(void *opaque);
83875+int kvm_arch_init(const void *opaque);
83876 void kvm_arch_exit(void);
83877
83878 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
83879diff --git a/include/linux/libata.h b/include/linux/libata.h
83880index 92abb49..e7fff2a 100644
83881--- a/include/linux/libata.h
83882+++ b/include/linux/libata.h
83883@@ -976,7 +976,7 @@ struct ata_port_operations {
83884 * fields must be pointers.
83885 */
83886 const struct ata_port_operations *inherits;
83887-};
83888+} __do_const;
83889
83890 struct ata_port_info {
83891 unsigned long flags;
83892diff --git a/include/linux/linkage.h b/include/linux/linkage.h
83893index a6a42dd..6c5ebce 100644
83894--- a/include/linux/linkage.h
83895+++ b/include/linux/linkage.h
83896@@ -36,6 +36,7 @@
83897 #endif
83898
83899 #define __page_aligned_data __section(.data..page_aligned) __aligned(PAGE_SIZE)
83900+#define __page_aligned_rodata __read_only __aligned(PAGE_SIZE)
83901 #define __page_aligned_bss __section(.bss..page_aligned) __aligned(PAGE_SIZE)
83902
83903 /*
83904diff --git a/include/linux/list.h b/include/linux/list.h
83905index cbbb96f..602d023 100644
83906--- a/include/linux/list.h
83907+++ b/include/linux/list.h
83908@@ -112,6 +112,19 @@ extern void __list_del_entry(struct list_head *entry);
83909 extern void list_del(struct list_head *entry);
83910 #endif
83911
83912+extern void __pax_list_add(struct list_head *new,
83913+ struct list_head *prev,
83914+ struct list_head *next);
83915+static inline void pax_list_add(struct list_head *new, struct list_head *head)
83916+{
83917+ __pax_list_add(new, head, head->next);
83918+}
83919+static inline void pax_list_add_tail(struct list_head *new, struct list_head *head)
83920+{
83921+ __pax_list_add(new, head->prev, head);
83922+}
83923+extern void pax_list_del(struct list_head *entry);
83924+
83925 /**
83926 * list_replace - replace old entry by new one
83927 * @old : the element to be replaced
83928@@ -145,6 +158,8 @@ static inline void list_del_init(struct list_head *entry)
83929 INIT_LIST_HEAD(entry);
83930 }
83931
83932+extern void pax_list_del_init(struct list_head *entry);
83933+
83934 /**
83935 * list_move - delete from one list and add as another's head
83936 * @list: the entry to move
83937diff --git a/include/linux/lockref.h b/include/linux/lockref.h
83938index 4bfde0e..d6e2e09 100644
83939--- a/include/linux/lockref.h
83940+++ b/include/linux/lockref.h
83941@@ -47,4 +47,36 @@ static inline int __lockref_is_dead(const struct lockref *l)
83942 return ((int)l->count < 0);
83943 }
83944
83945+static inline unsigned int __lockref_read(struct lockref *lockref)
83946+{
83947+ return lockref->count;
83948+}
83949+
83950+static inline void __lockref_set(struct lockref *lockref, unsigned int count)
83951+{
83952+ lockref->count = count;
83953+}
83954+
83955+static inline void __lockref_inc(struct lockref *lockref)
83956+{
83957+
83958+#ifdef CONFIG_PAX_REFCOUNT
83959+ atomic_inc((atomic_t *)&lockref->count);
83960+#else
83961+ lockref->count++;
83962+#endif
83963+
83964+}
83965+
83966+static inline void __lockref_dec(struct lockref *lockref)
83967+{
83968+
83969+#ifdef CONFIG_PAX_REFCOUNT
83970+ atomic_dec((atomic_t *)&lockref->count);
83971+#else
83972+ lockref->count--;
83973+#endif
83974+
83975+}
83976+
83977 #endif /* __LINUX_LOCKREF_H */
83978diff --git a/include/linux/math64.h b/include/linux/math64.h
83979index c45c089..298841c 100644
83980--- a/include/linux/math64.h
83981+++ b/include/linux/math64.h
83982@@ -15,7 +15,7 @@
83983 * This is commonly provided by 32bit archs to provide an optimized 64bit
83984 * divide.
83985 */
83986-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
83987+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
83988 {
83989 *remainder = dividend % divisor;
83990 return dividend / divisor;
83991@@ -42,7 +42,7 @@ static inline u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
83992 /**
83993 * div64_u64 - unsigned 64bit divide with 64bit divisor
83994 */
83995-static inline u64 div64_u64(u64 dividend, u64 divisor)
83996+static inline u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor)
83997 {
83998 return dividend / divisor;
83999 }
84000@@ -61,7 +61,7 @@ static inline s64 div64_s64(s64 dividend, s64 divisor)
84001 #define div64_ul(x, y) div_u64((x), (y))
84002
84003 #ifndef div_u64_rem
84004-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
84005+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
84006 {
84007 *remainder = do_div(dividend, divisor);
84008 return dividend;
84009@@ -77,7 +77,7 @@ extern u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder);
84010 #endif
84011
84012 #ifndef div64_u64
84013-extern u64 div64_u64(u64 dividend, u64 divisor);
84014+extern u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor);
84015 #endif
84016
84017 #ifndef div64_s64
84018@@ -94,7 +94,7 @@ extern s64 div64_s64(s64 dividend, s64 divisor);
84019 * divide.
84020 */
84021 #ifndef div_u64
84022-static inline u64 div_u64(u64 dividend, u32 divisor)
84023+static inline u64 __intentional_overflow(-1) div_u64(u64 dividend, u32 divisor)
84024 {
84025 u32 remainder;
84026 return div_u64_rem(dividend, divisor, &remainder);
84027diff --git a/include/linux/memory.h b/include/linux/memory.h
84028index bb7384e..8b8d8d1 100644
84029--- a/include/linux/memory.h
84030+++ b/include/linux/memory.h
84031@@ -35,7 +35,7 @@ struct memory_block {
84032 };
84033
84034 int arch_get_memory_phys_device(unsigned long start_pfn);
84035-unsigned long __weak memory_block_size_bytes(void);
84036+unsigned long memory_block_size_bytes(void);
84037
84038 /* These states are exposed to userspace as text strings in sysfs */
84039 #define MEM_ONLINE (1<<0) /* exposed to userspace */
84040diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
84041index f230a97..714c006 100644
84042--- a/include/linux/mempolicy.h
84043+++ b/include/linux/mempolicy.h
84044@@ -91,6 +91,10 @@ static inline struct mempolicy *mpol_dup(struct mempolicy *pol)
84045 }
84046
84047 #define vma_policy(vma) ((vma)->vm_policy)
84048+static inline void set_vma_policy(struct vm_area_struct *vma, struct mempolicy *pol)
84049+{
84050+ vma->vm_policy = pol;
84051+}
84052
84053 static inline void mpol_get(struct mempolicy *pol)
84054 {
84055@@ -228,6 +232,9 @@ static inline void mpol_free_shared_policy(struct shared_policy *p)
84056 }
84057
84058 #define vma_policy(vma) NULL
84059+static inline void set_vma_policy(struct vm_area_struct *vma, struct mempolicy *pol)
84060+{
84061+}
84062
84063 static inline int
84064 vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
84065diff --git a/include/linux/mm.h b/include/linux/mm.h
84066index 16e6f1e..d79d2f1 100644
84067--- a/include/linux/mm.h
84068+++ b/include/linux/mm.h
84069@@ -127,6 +127,11 @@ extern unsigned int kobjsize(const void *objp);
84070 #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
84071 #define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */
84072 #define VM_ARCH_1 0x01000000 /* Architecture-specific flag */
84073+
84074+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
84075+#define VM_PAGEEXEC 0x02000000 /* vma->vm_page_prot needs special handling */
84076+#endif
84077+
84078 #define VM_DONTDUMP 0x04000000 /* Do not include in the core dump */
84079
84080 #ifdef CONFIG_MEM_SOFT_DIRTY
84081@@ -237,8 +242,8 @@ struct vm_operations_struct {
84082 /* called by access_process_vm when get_user_pages() fails, typically
84083 * for use by special VMAs that can switch between memory and hardware
84084 */
84085- int (*access)(struct vm_area_struct *vma, unsigned long addr,
84086- void *buf, int len, int write);
84087+ ssize_t (*access)(struct vm_area_struct *vma, unsigned long addr,
84088+ void *buf, size_t len, int write);
84089
84090 /* Called by the /proc/PID/maps code to ask the vma whether it
84091 * has a special name. Returning non-NULL will also cause this
84092@@ -274,6 +279,7 @@ struct vm_operations_struct {
84093 int (*remap_pages)(struct vm_area_struct *vma, unsigned long addr,
84094 unsigned long size, pgoff_t pgoff);
84095 };
84096+typedef struct vm_operations_struct __no_const vm_operations_struct_no_const;
84097
84098 struct mmu_gather;
84099 struct inode;
84100@@ -1163,8 +1169,8 @@ int follow_pfn(struct vm_area_struct *vma, unsigned long address,
84101 unsigned long *pfn);
84102 int follow_phys(struct vm_area_struct *vma, unsigned long address,
84103 unsigned int flags, unsigned long *prot, resource_size_t *phys);
84104-int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
84105- void *buf, int len, int write);
84106+ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
84107+ void *buf, size_t len, int write);
84108
84109 static inline void unmap_shared_mapping_range(struct address_space *mapping,
84110 loff_t const holebegin, loff_t const holelen)
84111@@ -1203,9 +1209,9 @@ static inline int fixup_user_fault(struct task_struct *tsk,
84112 }
84113 #endif
84114
84115-extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
84116-extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
84117- void *buf, int len, int write);
84118+extern ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write);
84119+extern ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
84120+ void *buf, size_t len, int write);
84121
84122 long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
84123 unsigned long start, unsigned long nr_pages,
84124@@ -1238,34 +1244,6 @@ int set_page_dirty_lock(struct page *page);
84125 int clear_page_dirty_for_io(struct page *page);
84126 int get_cmdline(struct task_struct *task, char *buffer, int buflen);
84127
84128-/* Is the vma a continuation of the stack vma above it? */
84129-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
84130-{
84131- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
84132-}
84133-
84134-static inline int stack_guard_page_start(struct vm_area_struct *vma,
84135- unsigned long addr)
84136-{
84137- return (vma->vm_flags & VM_GROWSDOWN) &&
84138- (vma->vm_start == addr) &&
84139- !vma_growsdown(vma->vm_prev, addr);
84140-}
84141-
84142-/* Is the vma a continuation of the stack vma below it? */
84143-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
84144-{
84145- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
84146-}
84147-
84148-static inline int stack_guard_page_end(struct vm_area_struct *vma,
84149- unsigned long addr)
84150-{
84151- return (vma->vm_flags & VM_GROWSUP) &&
84152- (vma->vm_end == addr) &&
84153- !vma_growsup(vma->vm_next, addr);
84154-}
84155-
84156 extern pid_t
84157 vm_is_stack(struct task_struct *task, struct vm_area_struct *vma, int in_group);
84158
84159@@ -1365,6 +1343,15 @@ static inline void sync_mm_rss(struct mm_struct *mm)
84160 }
84161 #endif
84162
84163+#ifdef CONFIG_MMU
84164+pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
84165+#else
84166+static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
84167+{
84168+ return __pgprot(0);
84169+}
84170+#endif
84171+
84172 int vma_wants_writenotify(struct vm_area_struct *vma);
84173
84174 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
84175@@ -1383,8 +1370,15 @@ static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
84176 {
84177 return 0;
84178 }
84179+
84180+static inline int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd,
84181+ unsigned long address)
84182+{
84183+ return 0;
84184+}
84185 #else
84186 int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
84187+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
84188 #endif
84189
84190 #ifdef __PAGETABLE_PMD_FOLDED
84191@@ -1393,8 +1387,15 @@ static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
84192 {
84193 return 0;
84194 }
84195+
84196+static inline int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud,
84197+ unsigned long address)
84198+{
84199+ return 0;
84200+}
84201 #else
84202 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
84203+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address);
84204 #endif
84205
84206 int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
84207@@ -1412,11 +1413,23 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long a
84208 NULL: pud_offset(pgd, address);
84209 }
84210
84211+static inline pud_t *pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
84212+{
84213+ return (unlikely(pgd_none(*pgd)) && __pud_alloc_kernel(mm, pgd, address))?
84214+ NULL: pud_offset(pgd, address);
84215+}
84216+
84217 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
84218 {
84219 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
84220 NULL: pmd_offset(pud, address);
84221 }
84222+
84223+static inline pmd_t *pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
84224+{
84225+ return (unlikely(pud_none(*pud)) && __pmd_alloc_kernel(mm, pud, address))?
84226+ NULL: pmd_offset(pud, address);
84227+}
84228 #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
84229
84230 #if USE_SPLIT_PTE_PTLOCKS
84231@@ -1815,7 +1828,7 @@ extern int install_special_mapping(struct mm_struct *mm,
84232 unsigned long addr, unsigned long len,
84233 unsigned long flags, struct page **pages);
84234
84235-extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
84236+extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long) __intentional_overflow(-1);
84237
84238 extern unsigned long mmap_region(struct file *file, unsigned long addr,
84239 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff);
84240@@ -1823,6 +1836,7 @@ extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
84241 unsigned long len, unsigned long prot, unsigned long flags,
84242 unsigned long pgoff, unsigned long *populate);
84243 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
84244+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
84245
84246 #ifdef CONFIG_MMU
84247 extern int __mm_populate(unsigned long addr, unsigned long len,
84248@@ -1851,10 +1865,11 @@ struct vm_unmapped_area_info {
84249 unsigned long high_limit;
84250 unsigned long align_mask;
84251 unsigned long align_offset;
84252+ unsigned long threadstack_offset;
84253 };
84254
84255-extern unsigned long unmapped_area(struct vm_unmapped_area_info *info);
84256-extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
84257+extern unsigned long unmapped_area(const struct vm_unmapped_area_info *info);
84258+extern unsigned long unmapped_area_topdown(const struct vm_unmapped_area_info *info);
84259
84260 /*
84261 * Search for an unmapped address range.
84262@@ -1866,7 +1881,7 @@ extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
84263 * - satisfies (begin_addr & align_mask) == (align_offset & align_mask)
84264 */
84265 static inline unsigned long
84266-vm_unmapped_area(struct vm_unmapped_area_info *info)
84267+vm_unmapped_area(const struct vm_unmapped_area_info *info)
84268 {
84269 if (!(info->flags & VM_UNMAPPED_AREA_TOPDOWN))
84270 return unmapped_area(info);
84271@@ -1928,6 +1943,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
84272 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
84273 struct vm_area_struct **pprev);
84274
84275+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
84276+extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
84277+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
84278+
84279 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
84280 NULL if none. Assume start_addr < end_addr. */
84281 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
84282@@ -1956,15 +1975,6 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
84283 return vma;
84284 }
84285
84286-#ifdef CONFIG_MMU
84287-pgprot_t vm_get_page_prot(unsigned long vm_flags);
84288-#else
84289-static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
84290-{
84291- return __pgprot(0);
84292-}
84293-#endif
84294-
84295 #ifdef CONFIG_NUMA_BALANCING
84296 unsigned long change_prot_numa(struct vm_area_struct *vma,
84297 unsigned long start, unsigned long end);
84298@@ -2016,6 +2026,11 @@ void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
84299 static inline void vm_stat_account(struct mm_struct *mm,
84300 unsigned long flags, struct file *file, long pages)
84301 {
84302+
84303+#ifdef CONFIG_PAX_RANDMMAP
84304+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
84305+#endif
84306+
84307 mm->total_vm += pages;
84308 }
84309 #endif /* CONFIG_PROC_FS */
84310@@ -2104,7 +2119,7 @@ extern int unpoison_memory(unsigned long pfn);
84311 extern int sysctl_memory_failure_early_kill;
84312 extern int sysctl_memory_failure_recovery;
84313 extern void shake_page(struct page *p, int access);
84314-extern atomic_long_t num_poisoned_pages;
84315+extern atomic_long_unchecked_t num_poisoned_pages;
84316 extern int soft_offline_page(struct page *page, int flags);
84317
84318 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
84319@@ -2139,5 +2154,11 @@ void __init setup_nr_node_ids(void);
84320 static inline void setup_nr_node_ids(void) {}
84321 #endif
84322
84323+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
84324+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
84325+#else
84326+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
84327+#endif
84328+
84329 #endif /* __KERNEL__ */
84330 #endif /* _LINUX_MM_H */
84331diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
84332index 6e0b286..90d9c0d 100644
84333--- a/include/linux/mm_types.h
84334+++ b/include/linux/mm_types.h
84335@@ -308,7 +308,9 @@ struct vm_area_struct {
84336 #ifdef CONFIG_NUMA
84337 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
84338 #endif
84339-};
84340+
84341+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
84342+} __randomize_layout;
84343
84344 struct core_thread {
84345 struct task_struct *task;
84346@@ -454,7 +456,25 @@ struct mm_struct {
84347 bool tlb_flush_pending;
84348 #endif
84349 struct uprobes_state uprobes_state;
84350-};
84351+
84352+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
84353+ unsigned long pax_flags;
84354+#endif
84355+
84356+#ifdef CONFIG_PAX_DLRESOLVE
84357+ unsigned long call_dl_resolve;
84358+#endif
84359+
84360+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
84361+ unsigned long call_syscall;
84362+#endif
84363+
84364+#ifdef CONFIG_PAX_ASLR
84365+ unsigned long delta_mmap; /* randomized offset */
84366+ unsigned long delta_stack; /* randomized offset */
84367+#endif
84368+
84369+} __randomize_layout;
84370
84371 static inline void mm_init_cpumask(struct mm_struct *mm)
84372 {
84373diff --git a/include/linux/mmiotrace.h b/include/linux/mmiotrace.h
84374index c5d5278..f0b68c8 100644
84375--- a/include/linux/mmiotrace.h
84376+++ b/include/linux/mmiotrace.h
84377@@ -46,7 +46,7 @@ extern int kmmio_handler(struct pt_regs *regs, unsigned long addr);
84378 /* Called from ioremap.c */
84379 extern void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
84380 void __iomem *addr);
84381-extern void mmiotrace_iounmap(volatile void __iomem *addr);
84382+extern void mmiotrace_iounmap(const volatile void __iomem *addr);
84383
84384 /* For anyone to insert markers. Remember trailing newline. */
84385 extern __printf(1, 2) int mmiotrace_printk(const char *fmt, ...);
84386@@ -66,7 +66,7 @@ static inline void mmiotrace_ioremap(resource_size_t offset,
84387 {
84388 }
84389
84390-static inline void mmiotrace_iounmap(volatile void __iomem *addr)
84391+static inline void mmiotrace_iounmap(const volatile void __iomem *addr)
84392 {
84393 }
84394
84395diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
84396index 318df70..b74ec01 100644
84397--- a/include/linux/mmzone.h
84398+++ b/include/linux/mmzone.h
84399@@ -518,7 +518,7 @@ struct zone {
84400
84401 ZONE_PADDING(_pad3_)
84402 /* Zone statistics */
84403- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
84404+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
84405 } ____cacheline_internodealigned_in_smp;
84406
84407 typedef enum {
84408diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
84409index 44eeef0..a92d3f9 100644
84410--- a/include/linux/mod_devicetable.h
84411+++ b/include/linux/mod_devicetable.h
84412@@ -139,7 +139,7 @@ struct usb_device_id {
84413 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
84414 #define USB_DEVICE_ID_MATCH_INT_NUMBER 0x0400
84415
84416-#define HID_ANY_ID (~0)
84417+#define HID_ANY_ID (~0U)
84418 #define HID_BUS_ANY 0xffff
84419 #define HID_GROUP_ANY 0x0000
84420
84421@@ -475,7 +475,7 @@ struct dmi_system_id {
84422 const char *ident;
84423 struct dmi_strmatch matches[4];
84424 void *driver_data;
84425-};
84426+} __do_const;
84427 /*
84428 * struct dmi_device_id appears during expansion of
84429 * "MODULE_DEVICE_TABLE(dmi, x)". Compiler doesn't look inside it
84430diff --git a/include/linux/module.h b/include/linux/module.h
84431index 71f282a..b2387e2 100644
84432--- a/include/linux/module.h
84433+++ b/include/linux/module.h
84434@@ -17,9 +17,11 @@
84435 #include <linux/moduleparam.h>
84436 #include <linux/jump_label.h>
84437 #include <linux/export.h>
84438+#include <linux/fs.h>
84439
84440 #include <linux/percpu.h>
84441 #include <asm/module.h>
84442+#include <asm/pgtable.h>
84443
84444 /* In stripped ARM and x86-64 modules, ~ is surprisingly rare. */
84445 #define MODULE_SIG_STRING "~Module signature appended~\n"
84446@@ -42,7 +44,7 @@ struct module_kobject {
84447 struct kobject *drivers_dir;
84448 struct module_param_attrs *mp;
84449 struct completion *kobj_completion;
84450-};
84451+} __randomize_layout;
84452
84453 struct module_attribute {
84454 struct attribute attr;
84455@@ -54,12 +56,13 @@ struct module_attribute {
84456 int (*test)(struct module *);
84457 void (*free)(struct module *);
84458 };
84459+typedef struct module_attribute __no_const module_attribute_no_const;
84460
84461 struct module_version_attribute {
84462 struct module_attribute mattr;
84463 const char *module_name;
84464 const char *version;
84465-} __attribute__ ((__aligned__(sizeof(void *))));
84466+} __do_const __attribute__ ((__aligned__(sizeof(void *))));
84467
84468 extern ssize_t __modver_version_show(struct module_attribute *,
84469 struct module_kobject *, char *);
84470@@ -235,7 +238,7 @@ struct module {
84471
84472 /* Sysfs stuff. */
84473 struct module_kobject mkobj;
84474- struct module_attribute *modinfo_attrs;
84475+ module_attribute_no_const *modinfo_attrs;
84476 const char *version;
84477 const char *srcversion;
84478 struct kobject *holders_dir;
84479@@ -284,19 +287,16 @@ struct module {
84480 int (*init)(void);
84481
84482 /* If this is non-NULL, vfree after init() returns */
84483- void *module_init;
84484+ void *module_init_rx, *module_init_rw;
84485
84486 /* Here is the actual code + data, vfree'd on unload. */
84487- void *module_core;
84488+ void *module_core_rx, *module_core_rw;
84489
84490 /* Here are the sizes of the init and core sections */
84491- unsigned int init_size, core_size;
84492+ unsigned int init_size_rw, core_size_rw;
84493
84494 /* The size of the executable code in each section. */
84495- unsigned int init_text_size, core_text_size;
84496-
84497- /* Size of RO sections of the module (text+rodata) */
84498- unsigned int init_ro_size, core_ro_size;
84499+ unsigned int init_size_rx, core_size_rx;
84500
84501 /* Arch-specific module values */
84502 struct mod_arch_specific arch;
84503@@ -352,6 +352,10 @@ struct module {
84504 #ifdef CONFIG_EVENT_TRACING
84505 struct ftrace_event_call **trace_events;
84506 unsigned int num_trace_events;
84507+ struct file_operations trace_id;
84508+ struct file_operations trace_enable;
84509+ struct file_operations trace_format;
84510+ struct file_operations trace_filter;
84511 #endif
84512 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
84513 unsigned int num_ftrace_callsites;
84514@@ -375,7 +379,7 @@ struct module {
84515 ctor_fn_t *ctors;
84516 unsigned int num_ctors;
84517 #endif
84518-};
84519+} __randomize_layout;
84520 #ifndef MODULE_ARCH_INIT
84521 #define MODULE_ARCH_INIT {}
84522 #endif
84523@@ -396,18 +400,48 @@ bool is_module_address(unsigned long addr);
84524 bool is_module_percpu_address(unsigned long addr);
84525 bool is_module_text_address(unsigned long addr);
84526
84527+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
84528+{
84529+
84530+#ifdef CONFIG_PAX_KERNEXEC
84531+ if (ktla_ktva(addr) >= (unsigned long)start &&
84532+ ktla_ktva(addr) < (unsigned long)start + size)
84533+ return 1;
84534+#endif
84535+
84536+ return ((void *)addr >= start && (void *)addr < start + size);
84537+}
84538+
84539+static inline int within_module_core_rx(unsigned long addr, const struct module *mod)
84540+{
84541+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
84542+}
84543+
84544+static inline int within_module_core_rw(unsigned long addr, const struct module *mod)
84545+{
84546+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
84547+}
84548+
84549+static inline int within_module_init_rx(unsigned long addr, const struct module *mod)
84550+{
84551+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
84552+}
84553+
84554+static inline int within_module_init_rw(unsigned long addr, const struct module *mod)
84555+{
84556+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
84557+}
84558+
84559 static inline bool within_module_core(unsigned long addr,
84560 const struct module *mod)
84561 {
84562- return (unsigned long)mod->module_core <= addr &&
84563- addr < (unsigned long)mod->module_core + mod->core_size;
84564+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
84565 }
84566
84567 static inline bool within_module_init(unsigned long addr,
84568 const struct module *mod)
84569 {
84570- return (unsigned long)mod->module_init <= addr &&
84571- addr < (unsigned long)mod->module_init + mod->init_size;
84572+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
84573 }
84574
84575 static inline bool within_module(unsigned long addr, const struct module *mod)
84576diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
84577index 7eeb9bb..68f37e0 100644
84578--- a/include/linux/moduleloader.h
84579+++ b/include/linux/moduleloader.h
84580@@ -25,9 +25,21 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
84581 sections. Returns NULL on failure. */
84582 void *module_alloc(unsigned long size);
84583
84584+#ifdef CONFIG_PAX_KERNEXEC
84585+void *module_alloc_exec(unsigned long size);
84586+#else
84587+#define module_alloc_exec(x) module_alloc(x)
84588+#endif
84589+
84590 /* Free memory returned from module_alloc. */
84591 void module_free(struct module *mod, void *module_region);
84592
84593+#ifdef CONFIG_PAX_KERNEXEC
84594+void module_free_exec(struct module *mod, void *module_region);
84595+#else
84596+#define module_free_exec(x, y) module_free((x), (y))
84597+#endif
84598+
84599 /*
84600 * Apply the given relocation to the (simplified) ELF. Return -error
84601 * or 0.
84602@@ -45,8 +57,10 @@ static inline int apply_relocate(Elf_Shdr *sechdrs,
84603 unsigned int relsec,
84604 struct module *me)
84605 {
84606+#ifdef CONFIG_MODULES
84607 printk(KERN_ERR "module %s: REL relocation unsupported\n",
84608 module_name(me));
84609+#endif
84610 return -ENOEXEC;
84611 }
84612 #endif
84613@@ -68,8 +82,10 @@ static inline int apply_relocate_add(Elf_Shdr *sechdrs,
84614 unsigned int relsec,
84615 struct module *me)
84616 {
84617+#ifdef CONFIG_MODULES
84618 printk(KERN_ERR "module %s: REL relocation unsupported\n",
84619 module_name(me));
84620+#endif
84621 return -ENOEXEC;
84622 }
84623 #endif
84624diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
84625index 494f99e..5059f63 100644
84626--- a/include/linux/moduleparam.h
84627+++ b/include/linux/moduleparam.h
84628@@ -293,7 +293,7 @@ static inline void __kernel_param_unlock(void)
84629 * @len is usually just sizeof(string).
84630 */
84631 #define module_param_string(name, string, len, perm) \
84632- static const struct kparam_string __param_string_##name \
84633+ static const struct kparam_string __param_string_##name __used \
84634 = { len, string }; \
84635 __module_param_call(MODULE_PARAM_PREFIX, name, \
84636 &param_ops_string, \
84637@@ -437,7 +437,7 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp);
84638 */
84639 #define module_param_array_named(name, array, type, nump, perm) \
84640 param_check_##type(name, &(array)[0]); \
84641- static const struct kparam_array __param_arr_##name \
84642+ static const struct kparam_array __param_arr_##name __used \
84643 = { .max = ARRAY_SIZE(array), .num = nump, \
84644 .ops = &param_ops_##type, \
84645 .elemsize = sizeof(array[0]), .elem = array }; \
84646diff --git a/include/linux/mount.h b/include/linux/mount.h
84647index 9262e4b..0a45f98 100644
84648--- a/include/linux/mount.h
84649+++ b/include/linux/mount.h
84650@@ -66,7 +66,7 @@ struct vfsmount {
84651 struct dentry *mnt_root; /* root of the mounted tree */
84652 struct super_block *mnt_sb; /* pointer to superblock */
84653 int mnt_flags;
84654-};
84655+} __randomize_layout;
84656
84657 struct file; /* forward dec */
84658 struct path;
84659diff --git a/include/linux/namei.h b/include/linux/namei.h
84660index 492de72..1bddcd4 100644
84661--- a/include/linux/namei.h
84662+++ b/include/linux/namei.h
84663@@ -19,7 +19,7 @@ struct nameidata {
84664 unsigned seq, m_seq;
84665 int last_type;
84666 unsigned depth;
84667- char *saved_names[MAX_NESTED_LINKS + 1];
84668+ const char *saved_names[MAX_NESTED_LINKS + 1];
84669 };
84670
84671 /*
84672@@ -83,12 +83,12 @@ extern void unlock_rename(struct dentry *, struct dentry *);
84673
84674 extern void nd_jump_link(struct nameidata *nd, struct path *path);
84675
84676-static inline void nd_set_link(struct nameidata *nd, char *path)
84677+static inline void nd_set_link(struct nameidata *nd, const char *path)
84678 {
84679 nd->saved_names[nd->depth] = path;
84680 }
84681
84682-static inline char *nd_get_link(struct nameidata *nd)
84683+static inline const char *nd_get_link(const struct nameidata *nd)
84684 {
84685 return nd->saved_names[nd->depth];
84686 }
84687diff --git a/include/linux/net.h b/include/linux/net.h
84688index 17d8339..81656c0 100644
84689--- a/include/linux/net.h
84690+++ b/include/linux/net.h
84691@@ -192,7 +192,7 @@ struct net_proto_family {
84692 int (*create)(struct net *net, struct socket *sock,
84693 int protocol, int kern);
84694 struct module *owner;
84695-};
84696+} __do_const;
84697
84698 struct iovec;
84699 struct kvec;
84700diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
84701index c8e388e..5d8cd9b 100644
84702--- a/include/linux/netdevice.h
84703+++ b/include/linux/netdevice.h
84704@@ -1147,6 +1147,7 @@ struct net_device_ops {
84705 void *priv);
84706 int (*ndo_get_lock_subclass)(struct net_device *dev);
84707 };
84708+typedef struct net_device_ops __no_const net_device_ops_no_const;
84709
84710 /**
84711 * enum net_device_priv_flags - &struct net_device priv_flags
84712@@ -1485,10 +1486,10 @@ struct net_device {
84713
84714 struct net_device_stats stats;
84715
84716- atomic_long_t rx_dropped;
84717- atomic_long_t tx_dropped;
84718+ atomic_long_unchecked_t rx_dropped;
84719+ atomic_long_unchecked_t tx_dropped;
84720
84721- atomic_t carrier_changes;
84722+ atomic_unchecked_t carrier_changes;
84723
84724 #ifdef CONFIG_WIRELESS_EXT
84725 const struct iw_handler_def * wireless_handlers;
84726diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
84727index 2517ece..0bbfcfb 100644
84728--- a/include/linux/netfilter.h
84729+++ b/include/linux/netfilter.h
84730@@ -85,7 +85,7 @@ struct nf_sockopt_ops {
84731 #endif
84732 /* Use the module struct to lock set/get code in place */
84733 struct module *owner;
84734-};
84735+} __do_const;
84736
84737 /* Function to register/unregister hook points. */
84738 int nf_register_hook(struct nf_hook_ops *reg);
84739diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
84740index e955d47..04a5338 100644
84741--- a/include/linux/netfilter/nfnetlink.h
84742+++ b/include/linux/netfilter/nfnetlink.h
84743@@ -19,7 +19,7 @@ struct nfnl_callback {
84744 const struct nlattr * const cda[]);
84745 const struct nla_policy *policy; /* netlink attribute policy */
84746 const u_int16_t attr_count; /* number of nlattr's */
84747-};
84748+} __do_const;
84749
84750 struct nfnetlink_subsystem {
84751 const char *name;
84752diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
84753new file mode 100644
84754index 0000000..33f4af8
84755--- /dev/null
84756+++ b/include/linux/netfilter/xt_gradm.h
84757@@ -0,0 +1,9 @@
84758+#ifndef _LINUX_NETFILTER_XT_GRADM_H
84759+#define _LINUX_NETFILTER_XT_GRADM_H 1
84760+
84761+struct xt_gradm_mtinfo {
84762+ __u16 flags;
84763+ __u16 invflags;
84764+};
84765+
84766+#endif
84767diff --git a/include/linux/nls.h b/include/linux/nls.h
84768index 520681b..2b7fabb 100644
84769--- a/include/linux/nls.h
84770+++ b/include/linux/nls.h
84771@@ -31,7 +31,7 @@ struct nls_table {
84772 const unsigned char *charset2upper;
84773 struct module *owner;
84774 struct nls_table *next;
84775-};
84776+} __do_const;
84777
84778 /* this value hold the maximum octet of charset */
84779 #define NLS_MAX_CHARSET_SIZE 6 /* for UTF-8 */
84780@@ -46,7 +46,7 @@ enum utf16_endian {
84781 /* nls_base.c */
84782 extern int __register_nls(struct nls_table *, struct module *);
84783 extern int unregister_nls(struct nls_table *);
84784-extern struct nls_table *load_nls(char *);
84785+extern struct nls_table *load_nls(const char *);
84786 extern void unload_nls(struct nls_table *);
84787 extern struct nls_table *load_nls_default(void);
84788 #define register_nls(nls) __register_nls((nls), THIS_MODULE)
84789diff --git a/include/linux/notifier.h b/include/linux/notifier.h
84790index d14a4c3..a078786 100644
84791--- a/include/linux/notifier.h
84792+++ b/include/linux/notifier.h
84793@@ -54,7 +54,8 @@ struct notifier_block {
84794 notifier_fn_t notifier_call;
84795 struct notifier_block __rcu *next;
84796 int priority;
84797-};
84798+} __do_const;
84799+typedef struct notifier_block __no_const notifier_block_no_const;
84800
84801 struct atomic_notifier_head {
84802 spinlock_t lock;
84803diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
84804index b2a0f15..4d7da32 100644
84805--- a/include/linux/oprofile.h
84806+++ b/include/linux/oprofile.h
84807@@ -138,9 +138,9 @@ int oprofilefs_create_ulong(struct dentry * root,
84808 int oprofilefs_create_ro_ulong(struct dentry * root,
84809 char const * name, ulong * val);
84810
84811-/** Create a file for read-only access to an atomic_t. */
84812+/** Create a file for read-only access to an atomic_unchecked_t. */
84813 int oprofilefs_create_ro_atomic(struct dentry * root,
84814- char const * name, atomic_t * val);
84815+ char const * name, atomic_unchecked_t * val);
84816
84817 /** create a directory */
84818 struct dentry *oprofilefs_mkdir(struct dentry *parent, char const *name);
84819diff --git a/include/linux/padata.h b/include/linux/padata.h
84820index 4386946..f50c615 100644
84821--- a/include/linux/padata.h
84822+++ b/include/linux/padata.h
84823@@ -129,7 +129,7 @@ struct parallel_data {
84824 struct padata_serial_queue __percpu *squeue;
84825 atomic_t reorder_objects;
84826 atomic_t refcnt;
84827- atomic_t seq_nr;
84828+ atomic_unchecked_t seq_nr;
84829 struct padata_cpumask cpumask;
84830 spinlock_t lock ____cacheline_aligned;
84831 unsigned int processed;
84832diff --git a/include/linux/path.h b/include/linux/path.h
84833index d137218..be0c176 100644
84834--- a/include/linux/path.h
84835+++ b/include/linux/path.h
84836@@ -1,13 +1,15 @@
84837 #ifndef _LINUX_PATH_H
84838 #define _LINUX_PATH_H
84839
84840+#include <linux/compiler.h>
84841+
84842 struct dentry;
84843 struct vfsmount;
84844
84845 struct path {
84846 struct vfsmount *mnt;
84847 struct dentry *dentry;
84848-};
84849+} __randomize_layout;
84850
84851 extern void path_get(const struct path *);
84852 extern void path_put(const struct path *);
84853diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
84854index 5f2e559..7d59314 100644
84855--- a/include/linux/pci_hotplug.h
84856+++ b/include/linux/pci_hotplug.h
84857@@ -71,7 +71,8 @@ struct hotplug_slot_ops {
84858 int (*get_latch_status) (struct hotplug_slot *slot, u8 *value);
84859 int (*get_adapter_status) (struct hotplug_slot *slot, u8 *value);
84860 int (*reset_slot) (struct hotplug_slot *slot, int probe);
84861-};
84862+} __do_const;
84863+typedef struct hotplug_slot_ops __no_const hotplug_slot_ops_no_const;
84864
84865 /**
84866 * struct hotplug_slot_info - used to notify the hotplug pci core of the state of the slot
84867diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
84868index 707617a..28a2e7e 100644
84869--- a/include/linux/perf_event.h
84870+++ b/include/linux/perf_event.h
84871@@ -339,8 +339,8 @@ struct perf_event {
84872
84873 enum perf_event_active_state state;
84874 unsigned int attach_state;
84875- local64_t count;
84876- atomic64_t child_count;
84877+ local64_t count; /* PaX: fix it one day */
84878+ atomic64_unchecked_t child_count;
84879
84880 /*
84881 * These are the total time in nanoseconds that the event
84882@@ -391,8 +391,8 @@ struct perf_event {
84883 * These accumulate total time (in nanoseconds) that children
84884 * events have been enabled and running, respectively.
84885 */
84886- atomic64_t child_total_time_enabled;
84887- atomic64_t child_total_time_running;
84888+ atomic64_unchecked_t child_total_time_enabled;
84889+ atomic64_unchecked_t child_total_time_running;
84890
84891 /*
84892 * Protect attach/detach and child_list:
84893@@ -722,7 +722,7 @@ static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64
84894 entry->ip[entry->nr++] = ip;
84895 }
84896
84897-extern int sysctl_perf_event_paranoid;
84898+extern int sysctl_perf_event_legitimately_concerned;
84899 extern int sysctl_perf_event_mlock;
84900 extern int sysctl_perf_event_sample_rate;
84901 extern int sysctl_perf_cpu_time_max_percent;
84902@@ -737,19 +737,24 @@ extern int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
84903 loff_t *ppos);
84904
84905
84906+static inline bool perf_paranoid_any(void)
84907+{
84908+ return sysctl_perf_event_legitimately_concerned > 2;
84909+}
84910+
84911 static inline bool perf_paranoid_tracepoint_raw(void)
84912 {
84913- return sysctl_perf_event_paranoid > -1;
84914+ return sysctl_perf_event_legitimately_concerned > -1;
84915 }
84916
84917 static inline bool perf_paranoid_cpu(void)
84918 {
84919- return sysctl_perf_event_paranoid > 0;
84920+ return sysctl_perf_event_legitimately_concerned > 0;
84921 }
84922
84923 static inline bool perf_paranoid_kernel(void)
84924 {
84925- return sysctl_perf_event_paranoid > 1;
84926+ return sysctl_perf_event_legitimately_concerned > 1;
84927 }
84928
84929 extern void perf_event_init(void);
84930@@ -880,7 +885,7 @@ struct perf_pmu_events_attr {
84931 struct device_attribute attr;
84932 u64 id;
84933 const char *event_str;
84934-};
84935+} __do_const;
84936
84937 #define PMU_EVENT_ATTR(_name, _var, _id, _show) \
84938 static struct perf_pmu_events_attr _var = { \
84939diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h
84940index 1997ffc..4f1f44d 100644
84941--- a/include/linux/pid_namespace.h
84942+++ b/include/linux/pid_namespace.h
84943@@ -44,7 +44,7 @@ struct pid_namespace {
84944 int hide_pid;
84945 int reboot; /* group exit code if this pidns was rebooted */
84946 unsigned int proc_inum;
84947-};
84948+} __randomize_layout;
84949
84950 extern struct pid_namespace init_pid_ns;
84951
84952diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
84953index eb8b8ac..62649e1 100644
84954--- a/include/linux/pipe_fs_i.h
84955+++ b/include/linux/pipe_fs_i.h
84956@@ -47,10 +47,10 @@ struct pipe_inode_info {
84957 struct mutex mutex;
84958 wait_queue_head_t wait;
84959 unsigned int nrbufs, curbuf, buffers;
84960- unsigned int readers;
84961- unsigned int writers;
84962- unsigned int files;
84963- unsigned int waiting_writers;
84964+ atomic_t readers;
84965+ atomic_t writers;
84966+ atomic_t files;
84967+ atomic_t waiting_writers;
84968 unsigned int r_counter;
84969 unsigned int w_counter;
84970 struct page *tmp_page;
84971diff --git a/include/linux/pm.h b/include/linux/pm.h
84972index 72c0fe0..26918ed 100644
84973--- a/include/linux/pm.h
84974+++ b/include/linux/pm.h
84975@@ -620,6 +620,7 @@ extern int dev_pm_put_subsys_data(struct device *dev);
84976 struct dev_pm_domain {
84977 struct dev_pm_ops ops;
84978 };
84979+typedef struct dev_pm_domain __no_const dev_pm_domain_no_const;
84980
84981 /*
84982 * The PM_EVENT_ messages are also used by drivers implementing the legacy
84983diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
84984index ebc4c76..7fab7b0 100644
84985--- a/include/linux/pm_domain.h
84986+++ b/include/linux/pm_domain.h
84987@@ -44,11 +44,11 @@ struct gpd_dev_ops {
84988 int (*thaw_early)(struct device *dev);
84989 int (*thaw)(struct device *dev);
84990 bool (*active_wakeup)(struct device *dev);
84991-};
84992+} __no_const;
84993
84994 struct gpd_cpu_data {
84995 unsigned int saved_exit_latency;
84996- struct cpuidle_state *idle_state;
84997+ cpuidle_state_no_const *idle_state;
84998 };
84999
85000 struct generic_pm_domain {
85001diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
85002index 367f49b..d2f5a14 100644
85003--- a/include/linux/pm_runtime.h
85004+++ b/include/linux/pm_runtime.h
85005@@ -125,7 +125,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
85006
85007 static inline void pm_runtime_mark_last_busy(struct device *dev)
85008 {
85009- ACCESS_ONCE(dev->power.last_busy) = jiffies;
85010+ ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
85011 }
85012
85013 #else /* !CONFIG_PM_RUNTIME */
85014diff --git a/include/linux/pnp.h b/include/linux/pnp.h
85015index 195aafc..49a7bc2 100644
85016--- a/include/linux/pnp.h
85017+++ b/include/linux/pnp.h
85018@@ -297,7 +297,7 @@ static inline void pnp_set_drvdata(struct pnp_dev *pdev, void *data)
85019 struct pnp_fixup {
85020 char id[7];
85021 void (*quirk_function) (struct pnp_dev * dev); /* fixup function */
85022-};
85023+} __do_const;
85024
85025 /* config parameters */
85026 #define PNP_CONFIG_NORMAL 0x0001
85027diff --git a/include/linux/poison.h b/include/linux/poison.h
85028index 2110a81..13a11bb 100644
85029--- a/include/linux/poison.h
85030+++ b/include/linux/poison.h
85031@@ -19,8 +19,8 @@
85032 * under normal circumstances, used to verify that nobody uses
85033 * non-initialized list entries.
85034 */
85035-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
85036-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
85037+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
85038+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
85039
85040 /********** include/linux/timer.h **********/
85041 /*
85042diff --git a/include/linux/power/smartreflex.h b/include/linux/power/smartreflex.h
85043index d8b187c3..9a9257a 100644
85044--- a/include/linux/power/smartreflex.h
85045+++ b/include/linux/power/smartreflex.h
85046@@ -238,7 +238,7 @@ struct omap_sr_class_data {
85047 int (*notify)(struct omap_sr *sr, u32 status);
85048 u8 notify_flags;
85049 u8 class_type;
85050-};
85051+} __do_const;
85052
85053 /**
85054 * struct omap_sr_nvalue_table - Smartreflex n-target value info
85055diff --git a/include/linux/ppp-comp.h b/include/linux/ppp-comp.h
85056index 4ea1d37..80f4b33 100644
85057--- a/include/linux/ppp-comp.h
85058+++ b/include/linux/ppp-comp.h
85059@@ -84,7 +84,7 @@ struct compressor {
85060 struct module *owner;
85061 /* Extra skb space needed by the compressor algorithm */
85062 unsigned int comp_extra;
85063-};
85064+} __do_const;
85065
85066 /*
85067 * The return value from decompress routine is the length of the
85068diff --git a/include/linux/preempt.h b/include/linux/preempt.h
85069index de83b4e..c4b997d 100644
85070--- a/include/linux/preempt.h
85071+++ b/include/linux/preempt.h
85072@@ -27,11 +27,16 @@ extern void preempt_count_sub(int val);
85073 #define preempt_count_dec_and_test() __preempt_count_dec_and_test()
85074 #endif
85075
85076+#define raw_preempt_count_add(val) __preempt_count_add(val)
85077+#define raw_preempt_count_sub(val) __preempt_count_sub(val)
85078+
85079 #define __preempt_count_inc() __preempt_count_add(1)
85080 #define __preempt_count_dec() __preempt_count_sub(1)
85081
85082 #define preempt_count_inc() preempt_count_add(1)
85083+#define raw_preempt_count_inc() raw_preempt_count_add(1)
85084 #define preempt_count_dec() preempt_count_sub(1)
85085+#define raw_preempt_count_dec() raw_preempt_count_sub(1)
85086
85087 #ifdef CONFIG_PREEMPT_COUNT
85088
85089@@ -41,6 +46,12 @@ do { \
85090 barrier(); \
85091 } while (0)
85092
85093+#define raw_preempt_disable() \
85094+do { \
85095+ raw_preempt_count_inc(); \
85096+ barrier(); \
85097+} while (0)
85098+
85099 #define sched_preempt_enable_no_resched() \
85100 do { \
85101 barrier(); \
85102@@ -49,6 +60,12 @@ do { \
85103
85104 #define preempt_enable_no_resched() sched_preempt_enable_no_resched()
85105
85106+#define raw_preempt_enable_no_resched() \
85107+do { \
85108+ barrier(); \
85109+ raw_preempt_count_dec(); \
85110+} while (0)
85111+
85112 #ifdef CONFIG_PREEMPT
85113 #define preempt_enable() \
85114 do { \
85115@@ -113,8 +130,10 @@ do { \
85116 * region.
85117 */
85118 #define preempt_disable() barrier()
85119+#define raw_preempt_disable() barrier()
85120 #define sched_preempt_enable_no_resched() barrier()
85121 #define preempt_enable_no_resched() barrier()
85122+#define raw_preempt_enable_no_resched() barrier()
85123 #define preempt_enable() barrier()
85124 #define preempt_check_resched() do { } while (0)
85125
85126@@ -128,11 +147,13 @@ do { \
85127 /*
85128 * Modules have no business playing preemption tricks.
85129 */
85130+#ifndef CONFIG_PAX_KERNEXEC
85131 #undef sched_preempt_enable_no_resched
85132 #undef preempt_enable_no_resched
85133 #undef preempt_enable_no_resched_notrace
85134 #undef preempt_check_resched
85135 #endif
85136+#endif
85137
85138 #define preempt_set_need_resched() \
85139 do { \
85140diff --git a/include/linux/printk.h b/include/linux/printk.h
85141index d78125f..7f36596 100644
85142--- a/include/linux/printk.h
85143+++ b/include/linux/printk.h
85144@@ -124,6 +124,8 @@ static inline __printf(1, 2) __cold
85145 void early_printk(const char *s, ...) { }
85146 #endif
85147
85148+extern int kptr_restrict;
85149+
85150 #ifdef CONFIG_PRINTK
85151 asmlinkage __printf(5, 0)
85152 int vprintk_emit(int facility, int level,
85153@@ -158,7 +160,6 @@ extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
85154
85155 extern int printk_delay_msec;
85156 extern int dmesg_restrict;
85157-extern int kptr_restrict;
85158
85159 extern void wake_up_klogd(void);
85160
85161diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
85162index 9d117f6..d832b31 100644
85163--- a/include/linux/proc_fs.h
85164+++ b/include/linux/proc_fs.h
85165@@ -17,8 +17,11 @@ extern void proc_flush_task(struct task_struct *);
85166 extern struct proc_dir_entry *proc_symlink(const char *,
85167 struct proc_dir_entry *, const char *);
85168 extern struct proc_dir_entry *proc_mkdir(const char *, struct proc_dir_entry *);
85169+extern struct proc_dir_entry *proc_mkdir_restrict(const char *, struct proc_dir_entry *);
85170 extern struct proc_dir_entry *proc_mkdir_data(const char *, umode_t,
85171 struct proc_dir_entry *, void *);
85172+extern struct proc_dir_entry *proc_mkdir_data_restrict(const char *, umode_t,
85173+ struct proc_dir_entry *, void *);
85174 extern struct proc_dir_entry *proc_mkdir_mode(const char *, umode_t,
85175 struct proc_dir_entry *);
85176
85177@@ -34,6 +37,19 @@ static inline struct proc_dir_entry *proc_create(
85178 return proc_create_data(name, mode, parent, proc_fops, NULL);
85179 }
85180
85181+static inline struct proc_dir_entry *proc_create_grsec(const char *name, umode_t mode,
85182+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
85183+{
85184+#ifdef CONFIG_GRKERNSEC_PROC_USER
85185+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
85186+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
85187+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
85188+#else
85189+ return proc_create_data(name, mode, parent, proc_fops, NULL);
85190+#endif
85191+}
85192+
85193+
85194 extern void proc_set_size(struct proc_dir_entry *, loff_t);
85195 extern void proc_set_user(struct proc_dir_entry *, kuid_t, kgid_t);
85196 extern void *PDE_DATA(const struct inode *);
85197@@ -56,8 +72,12 @@ static inline struct proc_dir_entry *proc_symlink(const char *name,
85198 struct proc_dir_entry *parent,const char *dest) { return NULL;}
85199 static inline struct proc_dir_entry *proc_mkdir(const char *name,
85200 struct proc_dir_entry *parent) {return NULL;}
85201+static inline struct proc_dir_entry *proc_mkdir_restrict(const char *name,
85202+ struct proc_dir_entry *parent) { return NULL; }
85203 static inline struct proc_dir_entry *proc_mkdir_data(const char *name,
85204 umode_t mode, struct proc_dir_entry *parent, void *data) { return NULL; }
85205+static inline struct proc_dir_entry *proc_mkdir_data_restrict(const char *name,
85206+ umode_t mode, struct proc_dir_entry *parent, void *data) { return NULL; }
85207 static inline struct proc_dir_entry *proc_mkdir_mode(const char *name,
85208 umode_t mode, struct proc_dir_entry *parent) { return NULL; }
85209 #define proc_create(name, mode, parent, proc_fops) ({NULL;})
85210@@ -77,7 +97,7 @@ static inline int remove_proc_subtree(const char *name, struct proc_dir_entry *p
85211 static inline struct proc_dir_entry *proc_net_mkdir(
85212 struct net *net, const char *name, struct proc_dir_entry *parent)
85213 {
85214- return proc_mkdir_data(name, 0, parent, net);
85215+ return proc_mkdir_data_restrict(name, 0, parent, net);
85216 }
85217
85218 #endif /* _LINUX_PROC_FS_H */
85219diff --git a/include/linux/proc_ns.h b/include/linux/proc_ns.h
85220index 34a1e10..70f6bde 100644
85221--- a/include/linux/proc_ns.h
85222+++ b/include/linux/proc_ns.h
85223@@ -14,7 +14,7 @@ struct proc_ns_operations {
85224 void (*put)(void *ns);
85225 int (*install)(struct nsproxy *nsproxy, void *ns);
85226 unsigned int (*inum)(void *ns);
85227-};
85228+} __do_const __randomize_layout;
85229
85230 struct proc_ns {
85231 void *ns;
85232diff --git a/include/linux/quota.h b/include/linux/quota.h
85233index 80d345a..9e89a9a 100644
85234--- a/include/linux/quota.h
85235+++ b/include/linux/quota.h
85236@@ -70,7 +70,7 @@ struct kqid { /* Type in which we store the quota identifier */
85237
85238 extern bool qid_eq(struct kqid left, struct kqid right);
85239 extern bool qid_lt(struct kqid left, struct kqid right);
85240-extern qid_t from_kqid(struct user_namespace *to, struct kqid qid);
85241+extern qid_t from_kqid(struct user_namespace *to, struct kqid qid) __intentional_overflow(-1);
85242 extern qid_t from_kqid_munged(struct user_namespace *to, struct kqid qid);
85243 extern bool qid_valid(struct kqid qid);
85244
85245diff --git a/include/linux/random.h b/include/linux/random.h
85246index 57fbbff..2170304 100644
85247--- a/include/linux/random.h
85248+++ b/include/linux/random.h
85249@@ -9,9 +9,19 @@
85250 #include <uapi/linux/random.h>
85251
85252 extern void add_device_randomness(const void *, unsigned int);
85253+
85254+static inline void add_latent_entropy(void)
85255+{
85256+
85257+#ifdef LATENT_ENTROPY_PLUGIN
85258+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
85259+#endif
85260+
85261+}
85262+
85263 extern void add_input_randomness(unsigned int type, unsigned int code,
85264- unsigned int value);
85265-extern void add_interrupt_randomness(int irq, int irq_flags);
85266+ unsigned int value) __latent_entropy;
85267+extern void add_interrupt_randomness(int irq, int irq_flags) __latent_entropy;
85268
85269 extern void get_random_bytes(void *buf, int nbytes);
85270 extern void get_random_bytes_arch(void *buf, int nbytes);
85271@@ -22,10 +32,10 @@ extern int random_int_secret_init(void);
85272 extern const struct file_operations random_fops, urandom_fops;
85273 #endif
85274
85275-unsigned int get_random_int(void);
85276+unsigned int __intentional_overflow(-1) get_random_int(void);
85277 unsigned long randomize_range(unsigned long start, unsigned long end, unsigned long len);
85278
85279-u32 prandom_u32(void);
85280+u32 prandom_u32(void) __intentional_overflow(-1);
85281 void prandom_bytes(void *buf, int nbytes);
85282 void prandom_seed(u32 seed);
85283 void prandom_reseed_late(void);
85284@@ -37,6 +47,11 @@ struct rnd_state {
85285 u32 prandom_u32_state(struct rnd_state *state);
85286 void prandom_bytes_state(struct rnd_state *state, void *buf, int nbytes);
85287
85288+static inline unsigned long __intentional_overflow(-1) pax_get_random_long(void)
85289+{
85290+ return prandom_u32() + (sizeof(long) > 4 ? (unsigned long)prandom_u32() << 32 : 0);
85291+}
85292+
85293 /**
85294 * prandom_u32_max - returns a pseudo-random number in interval [0, ep_ro)
85295 * @ep_ro: right open interval endpoint
85296@@ -49,7 +64,7 @@ void prandom_bytes_state(struct rnd_state *state, void *buf, int nbytes);
85297 *
85298 * Returns: pseudo-random number in interval [0, ep_ro)
85299 */
85300-static inline u32 prandom_u32_max(u32 ep_ro)
85301+static inline u32 __intentional_overflow(-1) prandom_u32_max(u32 ep_ro)
85302 {
85303 return (u32)(((u64) prandom_u32() * ep_ro) >> 32);
85304 }
85305diff --git a/include/linux/rbtree_augmented.h b/include/linux/rbtree_augmented.h
85306index fea49b5..2ac22bb 100644
85307--- a/include/linux/rbtree_augmented.h
85308+++ b/include/linux/rbtree_augmented.h
85309@@ -80,7 +80,9 @@ rbname ## _rotate(struct rb_node *rb_old, struct rb_node *rb_new) \
85310 old->rbaugmented = rbcompute(old); \
85311 } \
85312 rbstatic const struct rb_augment_callbacks rbname = { \
85313- rbname ## _propagate, rbname ## _copy, rbname ## _rotate \
85314+ .propagate = rbname ## _propagate, \
85315+ .copy = rbname ## _copy, \
85316+ .rotate = rbname ## _rotate \
85317 };
85318
85319
85320diff --git a/include/linux/rculist.h b/include/linux/rculist.h
85321index 372ad5e..d4373f8 100644
85322--- a/include/linux/rculist.h
85323+++ b/include/linux/rculist.h
85324@@ -29,8 +29,8 @@
85325 */
85326 static inline void INIT_LIST_HEAD_RCU(struct list_head *list)
85327 {
85328- ACCESS_ONCE(list->next) = list;
85329- ACCESS_ONCE(list->prev) = list;
85330+ ACCESS_ONCE_RW(list->next) = list;
85331+ ACCESS_ONCE_RW(list->prev) = list;
85332 }
85333
85334 /*
85335@@ -59,6 +59,9 @@ void __list_add_rcu(struct list_head *new,
85336 struct list_head *prev, struct list_head *next);
85337 #endif
85338
85339+void __pax_list_add_rcu(struct list_head *new,
85340+ struct list_head *prev, struct list_head *next);
85341+
85342 /**
85343 * list_add_rcu - add a new entry to rcu-protected list
85344 * @new: new entry to be added
85345@@ -80,6 +83,11 @@ static inline void list_add_rcu(struct list_head *new, struct list_head *head)
85346 __list_add_rcu(new, head, head->next);
85347 }
85348
85349+static inline void pax_list_add_rcu(struct list_head *new, struct list_head *head)
85350+{
85351+ __pax_list_add_rcu(new, head, head->next);
85352+}
85353+
85354 /**
85355 * list_add_tail_rcu - add a new entry to rcu-protected list
85356 * @new: new entry to be added
85357@@ -102,6 +110,12 @@ static inline void list_add_tail_rcu(struct list_head *new,
85358 __list_add_rcu(new, head->prev, head);
85359 }
85360
85361+static inline void pax_list_add_tail_rcu(struct list_head *new,
85362+ struct list_head *head)
85363+{
85364+ __pax_list_add_rcu(new, head->prev, head);
85365+}
85366+
85367 /**
85368 * list_del_rcu - deletes entry from list without re-initialization
85369 * @entry: the element to delete from the list.
85370@@ -132,6 +146,8 @@ static inline void list_del_rcu(struct list_head *entry)
85371 entry->prev = LIST_POISON2;
85372 }
85373
85374+extern void pax_list_del_rcu(struct list_head *entry);
85375+
85376 /**
85377 * hlist_del_init_rcu - deletes entry from hash list with re-initialization
85378 * @n: the element to delete from the hash list.
85379diff --git a/include/linux/reboot.h b/include/linux/reboot.h
85380index 48bf152..d38b785 100644
85381--- a/include/linux/reboot.h
85382+++ b/include/linux/reboot.h
85383@@ -44,9 +44,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
85384 */
85385
85386 extern void migrate_to_reboot_cpu(void);
85387-extern void machine_restart(char *cmd);
85388-extern void machine_halt(void);
85389-extern void machine_power_off(void);
85390+extern void machine_restart(char *cmd) __noreturn;
85391+extern void machine_halt(void) __noreturn;
85392+extern void machine_power_off(void) __noreturn;
85393
85394 extern void machine_shutdown(void);
85395 struct pt_regs;
85396@@ -57,9 +57,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
85397 */
85398
85399 extern void kernel_restart_prepare(char *cmd);
85400-extern void kernel_restart(char *cmd);
85401-extern void kernel_halt(void);
85402-extern void kernel_power_off(void);
85403+extern void kernel_restart(char *cmd) __noreturn;
85404+extern void kernel_halt(void) __noreturn;
85405+extern void kernel_power_off(void) __noreturn;
85406
85407 extern int C_A_D; /* for sysctl */
85408 void ctrl_alt_del(void);
85409@@ -73,7 +73,7 @@ extern int orderly_poweroff(bool force);
85410 * Emergency restart, callable from an interrupt handler.
85411 */
85412
85413-extern void emergency_restart(void);
85414+extern void emergency_restart(void) __noreturn;
85415 #include <asm/emergency-restart.h>
85416
85417 #endif /* _LINUX_REBOOT_H */
85418diff --git a/include/linux/regset.h b/include/linux/regset.h
85419index 8e0c9fe..ac4d221 100644
85420--- a/include/linux/regset.h
85421+++ b/include/linux/regset.h
85422@@ -161,7 +161,8 @@ struct user_regset {
85423 unsigned int align;
85424 unsigned int bias;
85425 unsigned int core_note_type;
85426-};
85427+} __do_const;
85428+typedef struct user_regset __no_const user_regset_no_const;
85429
85430 /**
85431 * struct user_regset_view - available regsets
85432diff --git a/include/linux/relay.h b/include/linux/relay.h
85433index d7c8359..818daf5 100644
85434--- a/include/linux/relay.h
85435+++ b/include/linux/relay.h
85436@@ -157,7 +157,7 @@ struct rchan_callbacks
85437 * The callback should return 0 if successful, negative if not.
85438 */
85439 int (*remove_buf_file)(struct dentry *dentry);
85440-};
85441+} __no_const;
85442
85443 /*
85444 * CONFIG_RELAY kernel API, kernel/relay.c
85445diff --git a/include/linux/rio.h b/include/linux/rio.h
85446index 6bda06f..bf39a9b 100644
85447--- a/include/linux/rio.h
85448+++ b/include/linux/rio.h
85449@@ -358,7 +358,7 @@ struct rio_ops {
85450 int (*map_inb)(struct rio_mport *mport, dma_addr_t lstart,
85451 u64 rstart, u32 size, u32 flags);
85452 void (*unmap_inb)(struct rio_mport *mport, dma_addr_t lstart);
85453-};
85454+} __no_const;
85455
85456 #define RIO_RESOURCE_MEM 0x00000100
85457 #define RIO_RESOURCE_DOORBELL 0x00000200
85458diff --git a/include/linux/rmap.h b/include/linux/rmap.h
85459index be57450..31cf65e 100644
85460--- a/include/linux/rmap.h
85461+++ b/include/linux/rmap.h
85462@@ -144,8 +144,8 @@ static inline void anon_vma_unlock_read(struct anon_vma *anon_vma)
85463 void anon_vma_init(void); /* create anon_vma_cachep */
85464 int anon_vma_prepare(struct vm_area_struct *);
85465 void unlink_anon_vmas(struct vm_area_struct *);
85466-int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
85467-int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
85468+int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
85469+int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
85470
85471 static inline void anon_vma_merge(struct vm_area_struct *vma,
85472 struct vm_area_struct *next)
85473diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
85474index ed8f9e7..999bc96 100644
85475--- a/include/linux/scatterlist.h
85476+++ b/include/linux/scatterlist.h
85477@@ -1,6 +1,7 @@
85478 #ifndef _LINUX_SCATTERLIST_H
85479 #define _LINUX_SCATTERLIST_H
85480
85481+#include <linux/sched.h>
85482 #include <linux/string.h>
85483 #include <linux/bug.h>
85484 #include <linux/mm.h>
85485@@ -114,6 +115,12 @@ static inline void sg_set_buf(struct scatterlist *sg, const void *buf,
85486 #ifdef CONFIG_DEBUG_SG
85487 BUG_ON(!virt_addr_valid(buf));
85488 #endif
85489+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
85490+ if (object_starts_on_stack(buf)) {
85491+ void *adjbuf = buf - current->stack + current->lowmem_stack;
85492+ sg_set_page(sg, virt_to_page(adjbuf), buflen, offset_in_page(adjbuf));
85493+ } else
85494+#endif
85495 sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf));
85496 }
85497
85498diff --git a/include/linux/sched.h b/include/linux/sched.h
85499index 2b1d9e9..10ba706 100644
85500--- a/include/linux/sched.h
85501+++ b/include/linux/sched.h
85502@@ -132,6 +132,7 @@ struct fs_struct;
85503 struct perf_event_context;
85504 struct blk_plug;
85505 struct filename;
85506+struct linux_binprm;
85507
85508 #define VMACACHE_BITS 2
85509 #define VMACACHE_SIZE (1U << VMACACHE_BITS)
85510@@ -374,7 +375,7 @@ extern char __sched_text_start[], __sched_text_end[];
85511 extern int in_sched_functions(unsigned long addr);
85512
85513 #define MAX_SCHEDULE_TIMEOUT LONG_MAX
85514-extern signed long schedule_timeout(signed long timeout);
85515+extern signed long schedule_timeout(signed long timeout) __intentional_overflow(-1);
85516 extern signed long schedule_timeout_interruptible(signed long timeout);
85517 extern signed long schedule_timeout_killable(signed long timeout);
85518 extern signed long schedule_timeout_uninterruptible(signed long timeout);
85519@@ -385,6 +386,19 @@ struct nsproxy;
85520 struct user_namespace;
85521
85522 #ifdef CONFIG_MMU
85523+
85524+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
85525+extern unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags);
85526+#else
85527+static inline unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
85528+{
85529+ return 0;
85530+}
85531+#endif
85532+
85533+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset);
85534+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset);
85535+
85536 extern void arch_pick_mmap_layout(struct mm_struct *mm);
85537 extern unsigned long
85538 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
85539@@ -682,6 +696,17 @@ struct signal_struct {
85540 #ifdef CONFIG_TASKSTATS
85541 struct taskstats *stats;
85542 #endif
85543+
85544+#ifdef CONFIG_GRKERNSEC
85545+ u32 curr_ip;
85546+ u32 saved_ip;
85547+ u32 gr_saddr;
85548+ u32 gr_daddr;
85549+ u16 gr_sport;
85550+ u16 gr_dport;
85551+ u8 used_accept:1;
85552+#endif
85553+
85554 #ifdef CONFIG_AUDIT
85555 unsigned audit_tty;
85556 unsigned audit_tty_log_passwd;
85557@@ -708,7 +733,7 @@ struct signal_struct {
85558 struct mutex cred_guard_mutex; /* guard against foreign influences on
85559 * credential calculations
85560 * (notably. ptrace) */
85561-};
85562+} __randomize_layout;
85563
85564 /*
85565 * Bits in flags field of signal_struct.
85566@@ -761,6 +786,14 @@ struct user_struct {
85567 struct key *session_keyring; /* UID's default session keyring */
85568 #endif
85569
85570+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
85571+ unsigned char kernel_banned;
85572+#endif
85573+#ifdef CONFIG_GRKERNSEC_BRUTE
85574+ unsigned char suid_banned;
85575+ unsigned long suid_ban_expires;
85576+#endif
85577+
85578 /* Hash table maintenance information */
85579 struct hlist_node uidhash_node;
85580 kuid_t uid;
85581@@ -768,7 +801,7 @@ struct user_struct {
85582 #ifdef CONFIG_PERF_EVENTS
85583 atomic_long_t locked_vm;
85584 #endif
85585-};
85586+} __randomize_layout;
85587
85588 extern int uids_sysfs_init(void);
85589
85590@@ -1224,6 +1257,9 @@ enum perf_event_task_context {
85591 struct task_struct {
85592 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
85593 void *stack;
85594+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
85595+ void *lowmem_stack;
85596+#endif
85597 atomic_t usage;
85598 unsigned int flags; /* per process flags, defined below */
85599 unsigned int ptrace;
85600@@ -1345,8 +1381,8 @@ struct task_struct {
85601 struct list_head thread_node;
85602
85603 struct completion *vfork_done; /* for vfork() */
85604- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
85605- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
85606+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
85607+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
85608
85609 cputime_t utime, stime, utimescaled, stimescaled;
85610 cputime_t gtime;
85611@@ -1371,11 +1407,6 @@ struct task_struct {
85612 struct task_cputime cputime_expires;
85613 struct list_head cpu_timers[3];
85614
85615-/* process credentials */
85616- const struct cred __rcu *real_cred; /* objective and real subjective task
85617- * credentials (COW) */
85618- const struct cred __rcu *cred; /* effective (overridable) subjective task
85619- * credentials (COW) */
85620 char comm[TASK_COMM_LEN]; /* executable name excluding path
85621 - access with [gs]et_task_comm (which lock
85622 it with task_lock())
85623@@ -1393,6 +1424,10 @@ struct task_struct {
85624 #endif
85625 /* CPU-specific state of this task */
85626 struct thread_struct thread;
85627+/* thread_info moved to task_struct */
85628+#ifdef CONFIG_X86
85629+ struct thread_info tinfo;
85630+#endif
85631 /* filesystem information */
85632 struct fs_struct *fs;
85633 /* open file information */
85634@@ -1467,6 +1502,10 @@ struct task_struct {
85635 gfp_t lockdep_reclaim_gfp;
85636 #endif
85637
85638+/* process credentials */
85639+ const struct cred __rcu *real_cred; /* objective and real subjective task
85640+ * credentials (COW) */
85641+
85642 /* journalling filesystem info */
85643 void *journal_info;
85644
85645@@ -1505,6 +1544,10 @@ struct task_struct {
85646 /* cg_list protected by css_set_lock and tsk->alloc_lock */
85647 struct list_head cg_list;
85648 #endif
85649+
85650+ const struct cred __rcu *cred; /* effective (overridable) subjective task
85651+ * credentials (COW) */
85652+
85653 #ifdef CONFIG_FUTEX
85654 struct robust_list_head __user *robust_list;
85655 #ifdef CONFIG_COMPAT
85656@@ -1644,7 +1687,78 @@ struct task_struct {
85657 unsigned int sequential_io;
85658 unsigned int sequential_io_avg;
85659 #endif
85660-};
85661+
85662+#ifdef CONFIG_GRKERNSEC
85663+ /* grsecurity */
85664+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
85665+ u64 exec_id;
85666+#endif
85667+#ifdef CONFIG_GRKERNSEC_SETXID
85668+ const struct cred *delayed_cred;
85669+#endif
85670+ struct dentry *gr_chroot_dentry;
85671+ struct acl_subject_label *acl;
85672+ struct acl_subject_label *tmpacl;
85673+ struct acl_role_label *role;
85674+ struct file *exec_file;
85675+ unsigned long brute_expires;
85676+ u16 acl_role_id;
85677+ u8 inherited;
85678+ /* is this the task that authenticated to the special role */
85679+ u8 acl_sp_role;
85680+ u8 is_writable;
85681+ u8 brute;
85682+ u8 gr_is_chrooted;
85683+#endif
85684+
85685+} __randomize_layout;
85686+
85687+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
85688+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
85689+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
85690+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
85691+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
85692+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
85693+
85694+#ifdef CONFIG_PAX_SOFTMODE
85695+extern int pax_softmode;
85696+#endif
85697+
85698+extern int pax_check_flags(unsigned long *);
85699+#define PAX_PARSE_FLAGS_FALLBACK (~0UL)
85700+
85701+/* if tsk != current then task_lock must be held on it */
85702+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
85703+static inline unsigned long pax_get_flags(struct task_struct *tsk)
85704+{
85705+ if (likely(tsk->mm))
85706+ return tsk->mm->pax_flags;
85707+ else
85708+ return 0UL;
85709+}
85710+
85711+/* if tsk != current then task_lock must be held on it */
85712+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
85713+{
85714+ if (likely(tsk->mm)) {
85715+ tsk->mm->pax_flags = flags;
85716+ return 0;
85717+ }
85718+ return -EINVAL;
85719+}
85720+#endif
85721+
85722+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
85723+extern void pax_set_initial_flags(struct linux_binprm *bprm);
85724+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
85725+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
85726+#endif
85727+
85728+struct path;
85729+extern char *pax_get_path(const struct path *path, char *buf, int buflen);
85730+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
85731+extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
85732+extern void pax_report_refcount_overflow(struct pt_regs *regs);
85733
85734 /* Future-safe accessor for struct task_struct's cpus_allowed. */
85735 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
85736@@ -1726,7 +1840,7 @@ struct pid_namespace;
85737 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
85738 struct pid_namespace *ns);
85739
85740-static inline pid_t task_pid_nr(struct task_struct *tsk)
85741+static inline pid_t task_pid_nr(const struct task_struct *tsk)
85742 {
85743 return tsk->pid;
85744 }
85745@@ -2097,6 +2211,25 @@ extern u64 sched_clock_cpu(int cpu);
85746
85747 extern void sched_clock_init(void);
85748
85749+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
85750+static inline void populate_stack(void)
85751+{
85752+ struct task_struct *curtask = current;
85753+ int c;
85754+ int *ptr = curtask->stack;
85755+ int *end = curtask->stack + THREAD_SIZE;
85756+
85757+ while (ptr < end) {
85758+ c = *(volatile int *)ptr;
85759+ ptr += PAGE_SIZE/sizeof(int);
85760+ }
85761+}
85762+#else
85763+static inline void populate_stack(void)
85764+{
85765+}
85766+#endif
85767+
85768 #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
85769 static inline void sched_clock_tick(void)
85770 {
85771@@ -2230,7 +2363,9 @@ void yield(void);
85772 extern struct exec_domain default_exec_domain;
85773
85774 union thread_union {
85775+#ifndef CONFIG_X86
85776 struct thread_info thread_info;
85777+#endif
85778 unsigned long stack[THREAD_SIZE/sizeof(long)];
85779 };
85780
85781@@ -2263,6 +2398,7 @@ extern struct pid_namespace init_pid_ns;
85782 */
85783
85784 extern struct task_struct *find_task_by_vpid(pid_t nr);
85785+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
85786 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
85787 struct pid_namespace *ns);
85788
85789@@ -2427,7 +2563,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
85790 extern void exit_itimers(struct signal_struct *);
85791 extern void flush_itimer_signals(void);
85792
85793-extern void do_group_exit(int);
85794+extern __noreturn void do_group_exit(int);
85795
85796 extern int do_execve(struct filename *,
85797 const char __user * const __user *,
85798@@ -2642,9 +2778,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
85799
85800 #endif
85801
85802-static inline int object_is_on_stack(void *obj)
85803+static inline int object_starts_on_stack(const void *obj)
85804 {
85805- void *stack = task_stack_page(current);
85806+ const void *stack = task_stack_page(current);
85807
85808 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
85809 }
85810diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
85811index 596a0e0..bea77ec 100644
85812--- a/include/linux/sched/sysctl.h
85813+++ b/include/linux/sched/sysctl.h
85814@@ -34,6 +34,7 @@ enum { sysctl_hung_task_timeout_secs = 0 };
85815 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
85816
85817 extern int sysctl_max_map_count;
85818+extern unsigned long sysctl_heap_stack_gap;
85819
85820 extern unsigned int sysctl_sched_latency;
85821 extern unsigned int sysctl_sched_min_granularity;
85822diff --git a/include/linux/security.h b/include/linux/security.h
85823index 623f90e..90b39da 100644
85824--- a/include/linux/security.h
85825+++ b/include/linux/security.h
85826@@ -27,6 +27,7 @@
85827 #include <linux/slab.h>
85828 #include <linux/err.h>
85829 #include <linux/string.h>
85830+#include <linux/grsecurity.h>
85831
85832 struct linux_binprm;
85833 struct cred;
85834@@ -116,8 +117,6 @@ struct seq_file;
85835
85836 extern int cap_netlink_send(struct sock *sk, struct sk_buff *skb);
85837
85838-void reset_security_ops(void);
85839-
85840 #ifdef CONFIG_MMU
85841 extern unsigned long mmap_min_addr;
85842 extern unsigned long dac_mmap_min_addr;
85843@@ -1729,7 +1728,7 @@ struct security_operations {
85844 struct audit_context *actx);
85845 void (*audit_rule_free) (void *lsmrule);
85846 #endif /* CONFIG_AUDIT */
85847-};
85848+} __randomize_layout;
85849
85850 /* prototypes */
85851 extern int security_init(void);
85852diff --git a/include/linux/semaphore.h b/include/linux/semaphore.h
85853index dc368b8..e895209 100644
85854--- a/include/linux/semaphore.h
85855+++ b/include/linux/semaphore.h
85856@@ -37,7 +37,7 @@ static inline void sema_init(struct semaphore *sem, int val)
85857 }
85858
85859 extern void down(struct semaphore *sem);
85860-extern int __must_check down_interruptible(struct semaphore *sem);
85861+extern int __must_check down_interruptible(struct semaphore *sem) __intentional_overflow(-1);
85862 extern int __must_check down_killable(struct semaphore *sem);
85863 extern int __must_check down_trylock(struct semaphore *sem);
85864 extern int __must_check down_timeout(struct semaphore *sem, long jiffies);
85865diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
85866index 52e0097..383f21d 100644
85867--- a/include/linux/seq_file.h
85868+++ b/include/linux/seq_file.h
85869@@ -27,6 +27,9 @@ struct seq_file {
85870 struct mutex lock;
85871 const struct seq_operations *op;
85872 int poll_event;
85873+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
85874+ u64 exec_id;
85875+#endif
85876 #ifdef CONFIG_USER_NS
85877 struct user_namespace *user_ns;
85878 #endif
85879@@ -39,6 +42,7 @@ struct seq_operations {
85880 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
85881 int (*show) (struct seq_file *m, void *v);
85882 };
85883+typedef struct seq_operations __no_const seq_operations_no_const;
85884
85885 #define SEQ_SKIP 1
85886
85887@@ -96,6 +100,7 @@ void seq_pad(struct seq_file *m, char c);
85888
85889 char *mangle_path(char *s, const char *p, const char *esc);
85890 int seq_open(struct file *, const struct seq_operations *);
85891+int seq_open_restrict(struct file *, const struct seq_operations *);
85892 ssize_t seq_read(struct file *, char __user *, size_t, loff_t *);
85893 loff_t seq_lseek(struct file *, loff_t, int);
85894 int seq_release(struct inode *, struct file *);
85895@@ -138,6 +143,7 @@ static inline int seq_nodemask_list(struct seq_file *m, nodemask_t *mask)
85896 }
85897
85898 int single_open(struct file *, int (*)(struct seq_file *, void *), void *);
85899+int single_open_restrict(struct file *, int (*)(struct seq_file *, void *), void *);
85900 int single_open_size(struct file *, int (*)(struct seq_file *, void *), void *, size_t);
85901 int single_release(struct inode *, struct file *);
85902 void *__seq_open_private(struct file *, const struct seq_operations *, int);
85903diff --git a/include/linux/shm.h b/include/linux/shm.h
85904index 6fb8016..ab4465e 100644
85905--- a/include/linux/shm.h
85906+++ b/include/linux/shm.h
85907@@ -22,6 +22,10 @@ struct shmid_kernel /* private to the kernel */
85908 /* The task created the shm object. NULL if the task is dead. */
85909 struct task_struct *shm_creator;
85910 struct list_head shm_clist; /* list by creator */
85911+#ifdef CONFIG_GRKERNSEC
85912+ u64 shm_createtime;
85913+ pid_t shm_lapid;
85914+#endif
85915 };
85916
85917 /* shm_mode upper byte flags */
85918diff --git a/include/linux/signal.h b/include/linux/signal.h
85919index 750196f..ae7a3a4 100644
85920--- a/include/linux/signal.h
85921+++ b/include/linux/signal.h
85922@@ -292,7 +292,7 @@ static inline void allow_signal(int sig)
85923 * know it'll be handled, so that they don't get converted to
85924 * SIGKILL or just silently dropped.
85925 */
85926- kernel_sigaction(sig, (__force __sighandler_t)2);
85927+ kernel_sigaction(sig, (__force_user __sighandler_t)2);
85928 }
85929
85930 static inline void disallow_signal(int sig)
85931diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
85932index abde271..bc9ece1 100644
85933--- a/include/linux/skbuff.h
85934+++ b/include/linux/skbuff.h
85935@@ -728,7 +728,7 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
85936 struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
85937 int node);
85938 struct sk_buff *build_skb(void *data, unsigned int frag_size);
85939-static inline struct sk_buff *alloc_skb(unsigned int size,
85940+static inline struct sk_buff * __intentional_overflow(0) alloc_skb(unsigned int size,
85941 gfp_t priority)
85942 {
85943 return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
85944@@ -1845,7 +1845,7 @@ static inline u32 skb_inner_network_header_len(const struct sk_buff *skb)
85945 return skb->inner_transport_header - skb->inner_network_header;
85946 }
85947
85948-static inline int skb_network_offset(const struct sk_buff *skb)
85949+static inline int __intentional_overflow(0) skb_network_offset(const struct sk_buff *skb)
85950 {
85951 return skb_network_header(skb) - skb->data;
85952 }
85953@@ -1917,7 +1917,7 @@ static inline void skb_pop_rcv_encapsulation(struct sk_buff *skb)
85954 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
85955 */
85956 #ifndef NET_SKB_PAD
85957-#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
85958+#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
85959 #endif
85960
85961 int ___pskb_trim(struct sk_buff *skb, unsigned int len);
85962@@ -2524,7 +2524,7 @@ struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
85963 int *err);
85964 unsigned int datagram_poll(struct file *file, struct socket *sock,
85965 struct poll_table_struct *wait);
85966-int skb_copy_datagram_iovec(const struct sk_buff *from, int offset,
85967+int __intentional_overflow(0) skb_copy_datagram_iovec(const struct sk_buff *from, int offset,
85968 struct iovec *to, int size);
85969 int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb, int hlen,
85970 struct iovec *iov);
85971@@ -2918,6 +2918,9 @@ static inline void nf_reset(struct sk_buff *skb)
85972 nf_bridge_put(skb->nf_bridge);
85973 skb->nf_bridge = NULL;
85974 #endif
85975+#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
85976+ skb->nf_trace = 0;
85977+#endif
85978 }
85979
85980 static inline void nf_reset_trace(struct sk_buff *skb)
85981diff --git a/include/linux/slab.h b/include/linux/slab.h
85982index 1d9abb7..b1e8b10 100644
85983--- a/include/linux/slab.h
85984+++ b/include/linux/slab.h
85985@@ -14,15 +14,29 @@
85986 #include <linux/gfp.h>
85987 #include <linux/types.h>
85988 #include <linux/workqueue.h>
85989-
85990+#include <linux/err.h>
85991
85992 /*
85993 * Flags to pass to kmem_cache_create().
85994 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
85995 */
85996 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
85997+
85998+#ifdef CONFIG_PAX_USERCOPY_SLABS
85999+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
86000+#else
86001+#define SLAB_USERCOPY 0x00000000UL
86002+#endif
86003+
86004 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
86005 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
86006+
86007+#ifdef CONFIG_PAX_MEMORY_SANITIZE
86008+#define SLAB_NO_SANITIZE 0x00001000UL /* PaX: Do not sanitize objs on free */
86009+#else
86010+#define SLAB_NO_SANITIZE 0x00000000UL
86011+#endif
86012+
86013 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
86014 #define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */
86015 #define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */
86016@@ -98,10 +112,13 @@
86017 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
86018 * Both make kfree a no-op.
86019 */
86020-#define ZERO_SIZE_PTR ((void *)16)
86021+#define ZERO_SIZE_PTR \
86022+({ \
86023+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
86024+ (void *)(-MAX_ERRNO-1L); \
86025+})
86026
86027-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
86028- (unsigned long)ZERO_SIZE_PTR)
86029+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
86030
86031 #include <linux/kmemleak.h>
86032
86033@@ -144,6 +161,8 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
86034 void kfree(const void *);
86035 void kzfree(const void *);
86036 size_t ksize(const void *);
86037+const char *check_heap_object(const void *ptr, unsigned long n);
86038+bool is_usercopy_object(const void *ptr);
86039
86040 /*
86041 * Some archs want to perform DMA into kmalloc caches and need a guaranteed
86042@@ -176,7 +195,7 @@ struct kmem_cache {
86043 unsigned int align; /* Alignment as calculated */
86044 unsigned long flags; /* Active flags on the slab */
86045 const char *name; /* Slab name for sysfs */
86046- int refcount; /* Use counter */
86047+ atomic_t refcount; /* Use counter */
86048 void (*ctor)(void *); /* Called on object slot creation */
86049 struct list_head list; /* List of all slab caches on the system */
86050 };
86051@@ -261,6 +280,10 @@ extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
86052 extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
86053 #endif
86054
86055+#ifdef CONFIG_PAX_USERCOPY_SLABS
86056+extern struct kmem_cache *kmalloc_usercopy_caches[KMALLOC_SHIFT_HIGH + 1];
86057+#endif
86058+
86059 /*
86060 * Figure out which kmalloc slab an allocation of a certain size
86061 * belongs to.
86062@@ -269,7 +292,7 @@ extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
86063 * 2 = 120 .. 192 bytes
86064 * n = 2^(n-1) .. 2^n -1
86065 */
86066-static __always_inline int kmalloc_index(size_t size)
86067+static __always_inline __size_overflow(1) int kmalloc_index(size_t size)
86068 {
86069 if (!size)
86070 return 0;
86071@@ -312,11 +335,11 @@ static __always_inline int kmalloc_index(size_t size)
86072 }
86073 #endif /* !CONFIG_SLOB */
86074
86075-void *__kmalloc(size_t size, gfp_t flags);
86076+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
86077 void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags);
86078
86079 #ifdef CONFIG_NUMA
86080-void *__kmalloc_node(size_t size, gfp_t flags, int node);
86081+void *__kmalloc_node(size_t size, gfp_t flags, int node) __alloc_size(1);
86082 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
86083 #else
86084 static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
86085diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
86086index 8235dfb..47ce586 100644
86087--- a/include/linux/slab_def.h
86088+++ b/include/linux/slab_def.h
86089@@ -38,7 +38,7 @@ struct kmem_cache {
86090 /* 4) cache creation/removal */
86091 const char *name;
86092 struct list_head list;
86093- int refcount;
86094+ atomic_t refcount;
86095 int object_size;
86096 int align;
86097
86098@@ -54,10 +54,14 @@ struct kmem_cache {
86099 unsigned long node_allocs;
86100 unsigned long node_frees;
86101 unsigned long node_overflow;
86102- atomic_t allochit;
86103- atomic_t allocmiss;
86104- atomic_t freehit;
86105- atomic_t freemiss;
86106+ atomic_unchecked_t allochit;
86107+ atomic_unchecked_t allocmiss;
86108+ atomic_unchecked_t freehit;
86109+ atomic_unchecked_t freemiss;
86110+#ifdef CONFIG_PAX_MEMORY_SANITIZE
86111+ atomic_unchecked_t sanitized;
86112+ atomic_unchecked_t not_sanitized;
86113+#endif
86114
86115 /*
86116 * If debugging is enabled, then the allocator can add additional
86117diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
86118index d82abd4..408c3a0 100644
86119--- a/include/linux/slub_def.h
86120+++ b/include/linux/slub_def.h
86121@@ -74,7 +74,7 @@ struct kmem_cache {
86122 struct kmem_cache_order_objects max;
86123 struct kmem_cache_order_objects min;
86124 gfp_t allocflags; /* gfp flags to use on each alloc */
86125- int refcount; /* Refcount for slab cache destroy */
86126+ atomic_t refcount; /* Refcount for slab cache destroy */
86127 void (*ctor)(void *);
86128 int inuse; /* Offset to metadata */
86129 int align; /* Alignment */
86130diff --git a/include/linux/smp.h b/include/linux/smp.h
86131index 34347f2..8739978 100644
86132--- a/include/linux/smp.h
86133+++ b/include/linux/smp.h
86134@@ -174,7 +174,9 @@ static inline void kick_all_cpus_sync(void) { }
86135 #endif
86136
86137 #define get_cpu() ({ preempt_disable(); smp_processor_id(); })
86138+#define raw_get_cpu() ({ raw_preempt_disable(); raw_smp_processor_id(); })
86139 #define put_cpu() preempt_enable()
86140+#define raw_put_cpu_no_resched() raw_preempt_enable_no_resched()
86141
86142 /*
86143 * Callback to arch code if there's nosmp or maxcpus=0 on the
86144diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h
86145index 46cca4c..3323536 100644
86146--- a/include/linux/sock_diag.h
86147+++ b/include/linux/sock_diag.h
86148@@ -11,7 +11,7 @@ struct sock;
86149 struct sock_diag_handler {
86150 __u8 family;
86151 int (*dump)(struct sk_buff *skb, struct nlmsghdr *nlh);
86152-};
86153+} __do_const;
86154
86155 int sock_diag_register(const struct sock_diag_handler *h);
86156 void sock_diag_unregister(const struct sock_diag_handler *h);
86157diff --git a/include/linux/sonet.h b/include/linux/sonet.h
86158index 680f9a3..f13aeb0 100644
86159--- a/include/linux/sonet.h
86160+++ b/include/linux/sonet.h
86161@@ -7,7 +7,7 @@
86162 #include <uapi/linux/sonet.h>
86163
86164 struct k_sonet_stats {
86165-#define __HANDLE_ITEM(i) atomic_t i
86166+#define __HANDLE_ITEM(i) atomic_unchecked_t i
86167 __SONET_ITEMS
86168 #undef __HANDLE_ITEM
86169 };
86170diff --git a/include/linux/string.h b/include/linux/string.h
86171index d36977e..3b42b37 100644
86172--- a/include/linux/string.h
86173+++ b/include/linux/string.h
86174@@ -132,7 +132,7 @@ int bprintf(u32 *bin_buf, size_t size, const char *fmt, ...) __printf(3, 4);
86175 #endif
86176
86177 extern ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos,
86178- const void *from, size_t available);
86179+ const void *from, size_t available);
86180
86181 /**
86182 * strstarts - does @str start with @prefix?
86183@@ -144,7 +144,8 @@ static inline bool strstarts(const char *str, const char *prefix)
86184 return strncmp(str, prefix, strlen(prefix)) == 0;
86185 }
86186
86187-extern size_t memweight(const void *ptr, size_t bytes);
86188+size_t memweight(const void *ptr, size_t bytes);
86189+void memzero_explicit(void *s, size_t count);
86190
86191 /**
86192 * kbasename - return the last part of a pathname.
86193diff --git a/include/linux/sunrpc/addr.h b/include/linux/sunrpc/addr.h
86194index 07d8e53..dc934c9 100644
86195--- a/include/linux/sunrpc/addr.h
86196+++ b/include/linux/sunrpc/addr.h
86197@@ -23,9 +23,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
86198 {
86199 switch (sap->sa_family) {
86200 case AF_INET:
86201- return ntohs(((struct sockaddr_in *)sap)->sin_port);
86202+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
86203 case AF_INET6:
86204- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
86205+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
86206 }
86207 return 0;
86208 }
86209@@ -58,7 +58,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
86210 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
86211 const struct sockaddr *src)
86212 {
86213- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
86214+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
86215 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
86216
86217 dsin->sin_family = ssin->sin_family;
86218@@ -164,7 +164,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
86219 if (sa->sa_family != AF_INET6)
86220 return 0;
86221
86222- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
86223+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
86224 }
86225
86226 #endif /* _LINUX_SUNRPC_ADDR_H */
86227diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
86228index 70736b9..37f33db 100644
86229--- a/include/linux/sunrpc/clnt.h
86230+++ b/include/linux/sunrpc/clnt.h
86231@@ -97,7 +97,7 @@ struct rpc_procinfo {
86232 unsigned int p_timer; /* Which RTT timer to use */
86233 u32 p_statidx; /* Which procedure to account */
86234 const char * p_name; /* name of procedure */
86235-};
86236+} __do_const;
86237
86238 #ifdef __KERNEL__
86239
86240diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
86241index cf61ecd..a4a9bc0 100644
86242--- a/include/linux/sunrpc/svc.h
86243+++ b/include/linux/sunrpc/svc.h
86244@@ -417,7 +417,7 @@ struct svc_procedure {
86245 unsigned int pc_count; /* call count */
86246 unsigned int pc_cachetype; /* cache info (NFS) */
86247 unsigned int pc_xdrressize; /* maximum size of XDR reply */
86248-};
86249+} __do_const;
86250
86251 /*
86252 * Function prototypes.
86253diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
86254index 975da75..318c083 100644
86255--- a/include/linux/sunrpc/svc_rdma.h
86256+++ b/include/linux/sunrpc/svc_rdma.h
86257@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
86258 extern unsigned int svcrdma_max_requests;
86259 extern unsigned int svcrdma_max_req_size;
86260
86261-extern atomic_t rdma_stat_recv;
86262-extern atomic_t rdma_stat_read;
86263-extern atomic_t rdma_stat_write;
86264-extern atomic_t rdma_stat_sq_starve;
86265-extern atomic_t rdma_stat_rq_starve;
86266-extern atomic_t rdma_stat_rq_poll;
86267-extern atomic_t rdma_stat_rq_prod;
86268-extern atomic_t rdma_stat_sq_poll;
86269-extern atomic_t rdma_stat_sq_prod;
86270+extern atomic_unchecked_t rdma_stat_recv;
86271+extern atomic_unchecked_t rdma_stat_read;
86272+extern atomic_unchecked_t rdma_stat_write;
86273+extern atomic_unchecked_t rdma_stat_sq_starve;
86274+extern atomic_unchecked_t rdma_stat_rq_starve;
86275+extern atomic_unchecked_t rdma_stat_rq_poll;
86276+extern atomic_unchecked_t rdma_stat_rq_prod;
86277+extern atomic_unchecked_t rdma_stat_sq_poll;
86278+extern atomic_unchecked_t rdma_stat_sq_prod;
86279
86280 #define RPCRDMA_VERSION 1
86281
86282diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h
86283index 8d71d65..f79586e 100644
86284--- a/include/linux/sunrpc/svcauth.h
86285+++ b/include/linux/sunrpc/svcauth.h
86286@@ -120,7 +120,7 @@ struct auth_ops {
86287 int (*release)(struct svc_rqst *rq);
86288 void (*domain_release)(struct auth_domain *);
86289 int (*set_client)(struct svc_rqst *rq);
86290-};
86291+} __do_const;
86292
86293 #define SVC_GARBAGE 1
86294 #define SVC_SYSERR 2
86295diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
86296index e7a018e..49f8b17 100644
86297--- a/include/linux/swiotlb.h
86298+++ b/include/linux/swiotlb.h
86299@@ -60,7 +60,8 @@ extern void
86300
86301 extern void
86302 swiotlb_free_coherent(struct device *hwdev, size_t size,
86303- void *vaddr, dma_addr_t dma_handle);
86304+ void *vaddr, dma_addr_t dma_handle,
86305+ struct dma_attrs *attrs);
86306
86307 extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
86308 unsigned long offset, size_t size,
86309diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
86310index 0f86d85..dff3419 100644
86311--- a/include/linux/syscalls.h
86312+++ b/include/linux/syscalls.h
86313@@ -98,10 +98,16 @@ struct sigaltstack;
86314 #define __MAP(n,...) __MAP##n(__VA_ARGS__)
86315
86316 #define __SC_DECL(t, a) t a
86317+#define __TYPE_IS_U(t) (__same_type((t)0, 0UL) || __same_type((t)0, 0U) || __same_type((t)0, (unsigned short)0) || __same_type((t)0, (unsigned char)0))
86318 #define __TYPE_IS_L(t) (__same_type((t)0, 0L))
86319 #define __TYPE_IS_UL(t) (__same_type((t)0, 0UL))
86320 #define __TYPE_IS_LL(t) (__same_type((t)0, 0LL) || __same_type((t)0, 0ULL))
86321-#define __SC_LONG(t, a) __typeof(__builtin_choose_expr(__TYPE_IS_LL(t), 0LL, 0L)) a
86322+#define __SC_LONG(t, a) __typeof( \
86323+ __builtin_choose_expr( \
86324+ sizeof(t) > sizeof(int), \
86325+ (t) 0, \
86326+ __builtin_choose_expr(__TYPE_IS_U(t), 0UL, 0L) \
86327+ )) a
86328 #define __SC_CAST(t, a) (t) a
86329 #define __SC_ARGS(t, a) a
86330 #define __SC_TEST(t, a) (void)BUILD_BUG_ON_ZERO(!__TYPE_IS_LL(t) && sizeof(t) > sizeof(long))
86331@@ -383,11 +389,11 @@ asmlinkage long sys_sync(void);
86332 asmlinkage long sys_fsync(unsigned int fd);
86333 asmlinkage long sys_fdatasync(unsigned int fd);
86334 asmlinkage long sys_bdflush(int func, long data);
86335-asmlinkage long sys_mount(char __user *dev_name, char __user *dir_name,
86336- char __user *type, unsigned long flags,
86337+asmlinkage long sys_mount(const char __user *dev_name, const char __user *dir_name,
86338+ const char __user *type, unsigned long flags,
86339 void __user *data);
86340-asmlinkage long sys_umount(char __user *name, int flags);
86341-asmlinkage long sys_oldumount(char __user *name);
86342+asmlinkage long sys_umount(const char __user *name, int flags);
86343+asmlinkage long sys_oldumount(const char __user *name);
86344 asmlinkage long sys_truncate(const char __user *path, long length);
86345 asmlinkage long sys_ftruncate(unsigned int fd, unsigned long length);
86346 asmlinkage long sys_stat(const char __user *filename,
86347@@ -599,7 +605,7 @@ asmlinkage long sys_getsockname(int, struct sockaddr __user *, int __user *);
86348 asmlinkage long sys_getpeername(int, struct sockaddr __user *, int __user *);
86349 asmlinkage long sys_send(int, void __user *, size_t, unsigned);
86350 asmlinkage long sys_sendto(int, void __user *, size_t, unsigned,
86351- struct sockaddr __user *, int);
86352+ struct sockaddr __user *, int) __intentional_overflow(0);
86353 asmlinkage long sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags);
86354 asmlinkage long sys_sendmmsg(int fd, struct mmsghdr __user *msg,
86355 unsigned int vlen, unsigned flags);
86356diff --git a/include/linux/syscore_ops.h b/include/linux/syscore_ops.h
86357index 27b3b0b..e093dd9 100644
86358--- a/include/linux/syscore_ops.h
86359+++ b/include/linux/syscore_ops.h
86360@@ -16,7 +16,7 @@ struct syscore_ops {
86361 int (*suspend)(void);
86362 void (*resume)(void);
86363 void (*shutdown)(void);
86364-};
86365+} __do_const;
86366
86367 extern void register_syscore_ops(struct syscore_ops *ops);
86368 extern void unregister_syscore_ops(struct syscore_ops *ops);
86369diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
86370index b7361f8..341a15a 100644
86371--- a/include/linux/sysctl.h
86372+++ b/include/linux/sysctl.h
86373@@ -39,6 +39,8 @@ typedef int proc_handler (struct ctl_table *ctl, int write,
86374
86375 extern int proc_dostring(struct ctl_table *, int,
86376 void __user *, size_t *, loff_t *);
86377+extern int proc_dostring_modpriv(struct ctl_table *, int,
86378+ void __user *, size_t *, loff_t *);
86379 extern int proc_dointvec(struct ctl_table *, int,
86380 void __user *, size_t *, loff_t *);
86381 extern int proc_dointvec_minmax(struct ctl_table *, int,
86382@@ -113,7 +115,8 @@ struct ctl_table
86383 struct ctl_table_poll *poll;
86384 void *extra1;
86385 void *extra2;
86386-};
86387+} __do_const __randomize_layout;
86388+typedef struct ctl_table __no_const ctl_table_no_const;
86389
86390 struct ctl_node {
86391 struct rb_node node;
86392diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
86393index f97d0db..c1187dc 100644
86394--- a/include/linux/sysfs.h
86395+++ b/include/linux/sysfs.h
86396@@ -34,7 +34,8 @@ struct attribute {
86397 struct lock_class_key *key;
86398 struct lock_class_key skey;
86399 #endif
86400-};
86401+} __do_const;
86402+typedef struct attribute __no_const attribute_no_const;
86403
86404 /**
86405 * sysfs_attr_init - initialize a dynamically allocated sysfs attribute
86406@@ -63,7 +64,8 @@ struct attribute_group {
86407 struct attribute *, int);
86408 struct attribute **attrs;
86409 struct bin_attribute **bin_attrs;
86410-};
86411+} __do_const;
86412+typedef struct attribute_group __no_const attribute_group_no_const;
86413
86414 /**
86415 * Use these macros to make defining attributes easier. See include/linux/device.h
86416@@ -128,7 +130,8 @@ struct bin_attribute {
86417 char *, loff_t, size_t);
86418 int (*mmap)(struct file *, struct kobject *, struct bin_attribute *attr,
86419 struct vm_area_struct *vma);
86420-};
86421+} __do_const;
86422+typedef struct bin_attribute __no_const bin_attribute_no_const;
86423
86424 /**
86425 * sysfs_bin_attr_init - initialize a dynamically allocated bin_attribute
86426diff --git a/include/linux/sysrq.h b/include/linux/sysrq.h
86427index 387fa7d..3fcde6b 100644
86428--- a/include/linux/sysrq.h
86429+++ b/include/linux/sysrq.h
86430@@ -16,6 +16,7 @@
86431
86432 #include <linux/errno.h>
86433 #include <linux/types.h>
86434+#include <linux/compiler.h>
86435
86436 /* Possible values of bitmask for enabling sysrq functions */
86437 /* 0x0001 is reserved for enable everything */
86438@@ -33,7 +34,7 @@ struct sysrq_key_op {
86439 char *help_msg;
86440 char *action_msg;
86441 int enable_mask;
86442-};
86443+} __do_const;
86444
86445 #ifdef CONFIG_MAGIC_SYSRQ
86446
86447diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
86448index ff307b5..f1a4468 100644
86449--- a/include/linux/thread_info.h
86450+++ b/include/linux/thread_info.h
86451@@ -145,6 +145,13 @@ static inline bool test_and_clear_restore_sigmask(void)
86452 #error "no set_restore_sigmask() provided and default one won't work"
86453 #endif
86454
86455+extern void __check_object_size(const void *ptr, unsigned long n, bool to_user, bool const_size);
86456+
86457+static inline void check_object_size(const void *ptr, unsigned long n, bool to_user)
86458+{
86459+ __check_object_size(ptr, n, to_user, __builtin_constant_p(n));
86460+}
86461+
86462 #endif /* __KERNEL__ */
86463
86464 #endif /* _LINUX_THREAD_INFO_H */
86465diff --git a/include/linux/tty.h b/include/linux/tty.h
86466index 8413294..44391c7 100644
86467--- a/include/linux/tty.h
86468+++ b/include/linux/tty.h
86469@@ -202,7 +202,7 @@ struct tty_port {
86470 const struct tty_port_operations *ops; /* Port operations */
86471 spinlock_t lock; /* Lock protecting tty field */
86472 int blocked_open; /* Waiting to open */
86473- int count; /* Usage count */
86474+ atomic_t count; /* Usage count */
86475 wait_queue_head_t open_wait; /* Open waiters */
86476 wait_queue_head_t close_wait; /* Close waiters */
86477 wait_queue_head_t delta_msr_wait; /* Modem status change */
86478@@ -284,7 +284,7 @@ struct tty_struct {
86479 /* If the tty has a pending do_SAK, queue it here - akpm */
86480 struct work_struct SAK_work;
86481 struct tty_port *port;
86482-};
86483+} __randomize_layout;
86484
86485 /* Each of a tty's open files has private_data pointing to tty_file_private */
86486 struct tty_file_private {
86487@@ -548,7 +548,7 @@ extern int tty_port_open(struct tty_port *port,
86488 struct tty_struct *tty, struct file *filp);
86489 static inline int tty_port_users(struct tty_port *port)
86490 {
86491- return port->count + port->blocked_open;
86492+ return atomic_read(&port->count) + port->blocked_open;
86493 }
86494
86495 extern int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc);
86496diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
86497index e48c608..6a19af2 100644
86498--- a/include/linux/tty_driver.h
86499+++ b/include/linux/tty_driver.h
86500@@ -287,7 +287,7 @@ struct tty_operations {
86501 void (*poll_put_char)(struct tty_driver *driver, int line, char ch);
86502 #endif
86503 const struct file_operations *proc_fops;
86504-};
86505+} __do_const __randomize_layout;
86506
86507 struct tty_driver {
86508 int magic; /* magic number for this structure */
86509@@ -321,7 +321,7 @@ struct tty_driver {
86510
86511 const struct tty_operations *ops;
86512 struct list_head tty_drivers;
86513-};
86514+} __randomize_layout;
86515
86516 extern struct list_head tty_drivers;
86517
86518diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
86519index 00c9d68..bc0188b 100644
86520--- a/include/linux/tty_ldisc.h
86521+++ b/include/linux/tty_ldisc.h
86522@@ -215,7 +215,7 @@ struct tty_ldisc_ops {
86523
86524 struct module *owner;
86525
86526- int refcount;
86527+ atomic_t refcount;
86528 };
86529
86530 struct tty_ldisc {
86531diff --git a/include/linux/types.h b/include/linux/types.h
86532index a0bb704..f511c77 100644
86533--- a/include/linux/types.h
86534+++ b/include/linux/types.h
86535@@ -177,10 +177,26 @@ typedef struct {
86536 int counter;
86537 } atomic_t;
86538
86539+#ifdef CONFIG_PAX_REFCOUNT
86540+typedef struct {
86541+ int counter;
86542+} atomic_unchecked_t;
86543+#else
86544+typedef atomic_t atomic_unchecked_t;
86545+#endif
86546+
86547 #ifdef CONFIG_64BIT
86548 typedef struct {
86549 long counter;
86550 } atomic64_t;
86551+
86552+#ifdef CONFIG_PAX_REFCOUNT
86553+typedef struct {
86554+ long counter;
86555+} atomic64_unchecked_t;
86556+#else
86557+typedef atomic64_t atomic64_unchecked_t;
86558+#endif
86559 #endif
86560
86561 struct list_head {
86562diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
86563index ecd3319..8a36ded 100644
86564--- a/include/linux/uaccess.h
86565+++ b/include/linux/uaccess.h
86566@@ -75,11 +75,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
86567 long ret; \
86568 mm_segment_t old_fs = get_fs(); \
86569 \
86570- set_fs(KERNEL_DS); \
86571 pagefault_disable(); \
86572- ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
86573- pagefault_enable(); \
86574+ set_fs(KERNEL_DS); \
86575+ ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
86576 set_fs(old_fs); \
86577+ pagefault_enable(); \
86578 ret; \
86579 })
86580
86581diff --git a/include/linux/uidgid.h b/include/linux/uidgid.h
86582index 2d1f9b6..d7a9fce 100644
86583--- a/include/linux/uidgid.h
86584+++ b/include/linux/uidgid.h
86585@@ -175,4 +175,9 @@ static inline bool kgid_has_mapping(struct user_namespace *ns, kgid_t gid)
86586
86587 #endif /* CONFIG_USER_NS */
86588
86589+#define GR_GLOBAL_UID(x) from_kuid_munged(&init_user_ns, (x))
86590+#define GR_GLOBAL_GID(x) from_kgid_munged(&init_user_ns, (x))
86591+#define gr_is_global_root(x) uid_eq((x), GLOBAL_ROOT_UID)
86592+#define gr_is_global_nonroot(x) (!uid_eq((x), GLOBAL_ROOT_UID))
86593+
86594 #endif /* _LINUX_UIDGID_H */
86595diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
86596index 99c1b4d..562e6f3 100644
86597--- a/include/linux/unaligned/access_ok.h
86598+++ b/include/linux/unaligned/access_ok.h
86599@@ -4,34 +4,34 @@
86600 #include <linux/kernel.h>
86601 #include <asm/byteorder.h>
86602
86603-static inline u16 get_unaligned_le16(const void *p)
86604+static inline u16 __intentional_overflow(-1) get_unaligned_le16(const void *p)
86605 {
86606- return le16_to_cpup((__le16 *)p);
86607+ return le16_to_cpup((const __le16 *)p);
86608 }
86609
86610-static inline u32 get_unaligned_le32(const void *p)
86611+static inline u32 __intentional_overflow(-1) get_unaligned_le32(const void *p)
86612 {
86613- return le32_to_cpup((__le32 *)p);
86614+ return le32_to_cpup((const __le32 *)p);
86615 }
86616
86617-static inline u64 get_unaligned_le64(const void *p)
86618+static inline u64 __intentional_overflow(-1) get_unaligned_le64(const void *p)
86619 {
86620- return le64_to_cpup((__le64 *)p);
86621+ return le64_to_cpup((const __le64 *)p);
86622 }
86623
86624-static inline u16 get_unaligned_be16(const void *p)
86625+static inline u16 __intentional_overflow(-1) get_unaligned_be16(const void *p)
86626 {
86627- return be16_to_cpup((__be16 *)p);
86628+ return be16_to_cpup((const __be16 *)p);
86629 }
86630
86631-static inline u32 get_unaligned_be32(const void *p)
86632+static inline u32 __intentional_overflow(-1) get_unaligned_be32(const void *p)
86633 {
86634- return be32_to_cpup((__be32 *)p);
86635+ return be32_to_cpup((const __be32 *)p);
86636 }
86637
86638-static inline u64 get_unaligned_be64(const void *p)
86639+static inline u64 __intentional_overflow(-1) get_unaligned_be64(const void *p)
86640 {
86641- return be64_to_cpup((__be64 *)p);
86642+ return be64_to_cpup((const __be64 *)p);
86643 }
86644
86645 static inline void put_unaligned_le16(u16 val, void *p)
86646diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h
86647index 4f844c6..60beb5d 100644
86648--- a/include/linux/uprobes.h
86649+++ b/include/linux/uprobes.h
86650@@ -98,11 +98,11 @@ struct uprobes_state {
86651 struct xol_area *xol_area;
86652 };
86653
86654-extern int __weak set_swbp(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr);
86655-extern int __weak set_orig_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr);
86656-extern bool __weak is_swbp_insn(uprobe_opcode_t *insn);
86657-extern bool __weak is_trap_insn(uprobe_opcode_t *insn);
86658-extern unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs);
86659+extern int set_swbp(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr);
86660+extern int set_orig_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr);
86661+extern bool is_swbp_insn(uprobe_opcode_t *insn);
86662+extern bool is_trap_insn(uprobe_opcode_t *insn);
86663+extern unsigned long uprobe_get_swbp_addr(struct pt_regs *regs);
86664 extern unsigned long uprobe_get_trap_addr(struct pt_regs *regs);
86665 extern int uprobe_write_opcode(struct mm_struct *mm, unsigned long vaddr, uprobe_opcode_t);
86666 extern int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc);
86667@@ -128,8 +128,8 @@ extern bool arch_uprobe_xol_was_trapped(struct task_struct *tsk);
86668 extern int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val, void *data);
86669 extern void arch_uprobe_abort_xol(struct arch_uprobe *aup, struct pt_regs *regs);
86670 extern unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs);
86671-extern bool __weak arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *regs);
86672-extern void __weak arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
86673+extern bool arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *regs);
86674+extern void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
86675 void *src, unsigned long len);
86676 #else /* !CONFIG_UPROBES */
86677 struct uprobes_state {
86678diff --git a/include/linux/usb.h b/include/linux/usb.h
86679index d2465bc..5256de4 100644
86680--- a/include/linux/usb.h
86681+++ b/include/linux/usb.h
86682@@ -571,7 +571,7 @@ struct usb_device {
86683 int maxchild;
86684
86685 u32 quirks;
86686- atomic_t urbnum;
86687+ atomic_unchecked_t urbnum;
86688
86689 unsigned long active_duration;
86690
86691@@ -1655,7 +1655,7 @@ void usb_buffer_unmap_sg(const struct usb_device *dev, int is_in,
86692
86693 extern int usb_control_msg(struct usb_device *dev, unsigned int pipe,
86694 __u8 request, __u8 requesttype, __u16 value, __u16 index,
86695- void *data, __u16 size, int timeout);
86696+ void *data, __u16 size, int timeout) __intentional_overflow(-1);
86697 extern int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
86698 void *data, int len, int *actual_length, int timeout);
86699 extern int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
86700diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
86701index d5952bb..9a626d4 100644
86702--- a/include/linux/usb/renesas_usbhs.h
86703+++ b/include/linux/usb/renesas_usbhs.h
86704@@ -39,7 +39,7 @@ enum {
86705 */
86706 struct renesas_usbhs_driver_callback {
86707 int (*notify_hotplug)(struct platform_device *pdev);
86708-};
86709+} __no_const;
86710
86711 /*
86712 * callback functions for platform
86713diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
86714index e953726..8edb26a 100644
86715--- a/include/linux/user_namespace.h
86716+++ b/include/linux/user_namespace.h
86717@@ -33,7 +33,7 @@ struct user_namespace {
86718 struct key *persistent_keyring_register;
86719 struct rw_semaphore persistent_keyring_register_sem;
86720 #endif
86721-};
86722+} __randomize_layout;
86723
86724 extern struct user_namespace init_user_ns;
86725
86726diff --git a/include/linux/utsname.h b/include/linux/utsname.h
86727index 239e277..22a5cf5 100644
86728--- a/include/linux/utsname.h
86729+++ b/include/linux/utsname.h
86730@@ -24,7 +24,7 @@ struct uts_namespace {
86731 struct new_utsname name;
86732 struct user_namespace *user_ns;
86733 unsigned int proc_inum;
86734-};
86735+} __randomize_layout;
86736 extern struct uts_namespace init_uts_ns;
86737
86738 #ifdef CONFIG_UTS_NS
86739diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
86740index 6f8fbcf..4efc177 100644
86741--- a/include/linux/vermagic.h
86742+++ b/include/linux/vermagic.h
86743@@ -25,9 +25,42 @@
86744 #define MODULE_ARCH_VERMAGIC ""
86745 #endif
86746
86747+#ifdef CONFIG_PAX_REFCOUNT
86748+#define MODULE_PAX_REFCOUNT "REFCOUNT "
86749+#else
86750+#define MODULE_PAX_REFCOUNT ""
86751+#endif
86752+
86753+#ifdef CONSTIFY_PLUGIN
86754+#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
86755+#else
86756+#define MODULE_CONSTIFY_PLUGIN ""
86757+#endif
86758+
86759+#ifdef STACKLEAK_PLUGIN
86760+#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
86761+#else
86762+#define MODULE_STACKLEAK_PLUGIN ""
86763+#endif
86764+
86765+#ifdef RANDSTRUCT_PLUGIN
86766+#include <generated/randomize_layout_hash.h>
86767+#define MODULE_RANDSTRUCT_PLUGIN "RANDSTRUCT_PLUGIN_" RANDSTRUCT_HASHED_SEED
86768+#else
86769+#define MODULE_RANDSTRUCT_PLUGIN
86770+#endif
86771+
86772+#ifdef CONFIG_GRKERNSEC
86773+#define MODULE_GRSEC "GRSEC "
86774+#else
86775+#define MODULE_GRSEC ""
86776+#endif
86777+
86778 #define VERMAGIC_STRING \
86779 UTS_RELEASE " " \
86780 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
86781 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
86782- MODULE_ARCH_VERMAGIC
86783+ MODULE_ARCH_VERMAGIC \
86784+ MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
86785+ MODULE_GRSEC MODULE_RANDSTRUCT_PLUGIN
86786
86787diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h
86788index b483abd..af305ad 100644
86789--- a/include/linux/vga_switcheroo.h
86790+++ b/include/linux/vga_switcheroo.h
86791@@ -63,9 +63,9 @@ int vga_switcheroo_get_client_state(struct pci_dev *dev);
86792
86793 void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic);
86794
86795-int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain);
86796+int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain);
86797 void vga_switcheroo_fini_domain_pm_ops(struct device *dev);
86798-int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain);
86799+int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain);
86800 #else
86801
86802 static inline void vga_switcheroo_unregister_client(struct pci_dev *dev) {}
86803@@ -82,9 +82,9 @@ static inline int vga_switcheroo_get_client_state(struct pci_dev *dev) { return
86804
86805 static inline void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic) {}
86806
86807-static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
86808+static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain) { return -EINVAL; }
86809 static inline void vga_switcheroo_fini_domain_pm_ops(struct device *dev) {}
86810-static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
86811+static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain) { return -EINVAL; }
86812
86813 #endif
86814 #endif /* _LINUX_VGA_SWITCHEROO_H_ */
86815diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
86816index b87696f..1d11de7 100644
86817--- a/include/linux/vmalloc.h
86818+++ b/include/linux/vmalloc.h
86819@@ -16,6 +16,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
86820 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
86821 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
86822 #define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */
86823+
86824+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
86825+#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
86826+#endif
86827+
86828 /* bits [20..32] reserved for arch specific ioremap internals */
86829
86830 /*
86831@@ -82,6 +87,10 @@ extern void *vmap(struct page **pages, unsigned int count,
86832 unsigned long flags, pgprot_t prot);
86833 extern void vunmap(const void *addr);
86834
86835+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
86836+extern void unmap_process_stacks(struct task_struct *task);
86837+#endif
86838+
86839 extern int remap_vmalloc_range_partial(struct vm_area_struct *vma,
86840 unsigned long uaddr, void *kaddr,
86841 unsigned long size);
86842@@ -142,7 +151,7 @@ extern void free_vm_area(struct vm_struct *area);
86843
86844 /* for /dev/kmem */
86845 extern long vread(char *buf, char *addr, unsigned long count);
86846-extern long vwrite(char *buf, char *addr, unsigned long count);
86847+extern long vwrite(char *buf, char *addr, unsigned long count) __size_overflow(3);
86848
86849 /*
86850 * Internals. Dont't use..
86851diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
86852index 82e7db7..f8ce3d0 100644
86853--- a/include/linux/vmstat.h
86854+++ b/include/linux/vmstat.h
86855@@ -108,18 +108,18 @@ static inline void vm_events_fold_cpu(int cpu)
86856 /*
86857 * Zone based page accounting with per cpu differentials.
86858 */
86859-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
86860+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
86861
86862 static inline void zone_page_state_add(long x, struct zone *zone,
86863 enum zone_stat_item item)
86864 {
86865- atomic_long_add(x, &zone->vm_stat[item]);
86866- atomic_long_add(x, &vm_stat[item]);
86867+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
86868+ atomic_long_add_unchecked(x, &vm_stat[item]);
86869 }
86870
86871-static inline unsigned long global_page_state(enum zone_stat_item item)
86872+static inline unsigned long __intentional_overflow(-1) global_page_state(enum zone_stat_item item)
86873 {
86874- long x = atomic_long_read(&vm_stat[item]);
86875+ long x = atomic_long_read_unchecked(&vm_stat[item]);
86876 #ifdef CONFIG_SMP
86877 if (x < 0)
86878 x = 0;
86879@@ -127,10 +127,10 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
86880 return x;
86881 }
86882
86883-static inline unsigned long zone_page_state(struct zone *zone,
86884+static inline unsigned long __intentional_overflow(-1) zone_page_state(struct zone *zone,
86885 enum zone_stat_item item)
86886 {
86887- long x = atomic_long_read(&zone->vm_stat[item]);
86888+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
86889 #ifdef CONFIG_SMP
86890 if (x < 0)
86891 x = 0;
86892@@ -147,7 +147,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
86893 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
86894 enum zone_stat_item item)
86895 {
86896- long x = atomic_long_read(&zone->vm_stat[item]);
86897+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
86898
86899 #ifdef CONFIG_SMP
86900 int cpu;
86901@@ -234,14 +234,14 @@ static inline void __mod_zone_page_state(struct zone *zone,
86902
86903 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
86904 {
86905- atomic_long_inc(&zone->vm_stat[item]);
86906- atomic_long_inc(&vm_stat[item]);
86907+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
86908+ atomic_long_inc_unchecked(&vm_stat[item]);
86909 }
86910
86911 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
86912 {
86913- atomic_long_dec(&zone->vm_stat[item]);
86914- atomic_long_dec(&vm_stat[item]);
86915+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
86916+ atomic_long_dec_unchecked(&vm_stat[item]);
86917 }
86918
86919 static inline void __inc_zone_page_state(struct page *page,
86920diff --git a/include/linux/xattr.h b/include/linux/xattr.h
86921index 91b0a68..0e9adf6 100644
86922--- a/include/linux/xattr.h
86923+++ b/include/linux/xattr.h
86924@@ -28,7 +28,7 @@ struct xattr_handler {
86925 size_t size, int handler_flags);
86926 int (*set)(struct dentry *dentry, const char *name, const void *buffer,
86927 size_t size, int flags, int handler_flags);
86928-};
86929+} __do_const;
86930
86931 struct xattr {
86932 const char *name;
86933@@ -37,6 +37,9 @@ struct xattr {
86934 };
86935
86936 ssize_t xattr_getsecurity(struct inode *, const char *, void *, size_t);
86937+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
86938+ssize_t pax_getxattr(struct dentry *, void *, size_t);
86939+#endif
86940 ssize_t vfs_getxattr(struct dentry *, const char *, void *, size_t);
86941 ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size);
86942 int __vfs_setxattr_noperm(struct dentry *, const char *, const void *, size_t, int);
86943diff --git a/include/linux/zlib.h b/include/linux/zlib.h
86944index 92dbbd3..13ab0b3 100644
86945--- a/include/linux/zlib.h
86946+++ b/include/linux/zlib.h
86947@@ -31,6 +31,7 @@
86948 #define _ZLIB_H
86949
86950 #include <linux/zconf.h>
86951+#include <linux/compiler.h>
86952
86953 /* zlib deflate based on ZLIB_VERSION "1.1.3" */
86954 /* zlib inflate based on ZLIB_VERSION "1.2.3" */
86955@@ -179,7 +180,7 @@ typedef z_stream *z_streamp;
86956
86957 /* basic functions */
86958
86959-extern int zlib_deflate_workspacesize (int windowBits, int memLevel);
86960+extern int zlib_deflate_workspacesize (int windowBits, int memLevel) __intentional_overflow(0);
86961 /*
86962 Returns the number of bytes that needs to be allocated for a per-
86963 stream workspace with the specified parameters. A pointer to this
86964diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
86965index eb76cfd..9fd0e7c 100644
86966--- a/include/media/v4l2-dev.h
86967+++ b/include/media/v4l2-dev.h
86968@@ -75,7 +75,7 @@ struct v4l2_file_operations {
86969 int (*mmap) (struct file *, struct vm_area_struct *);
86970 int (*open) (struct file *);
86971 int (*release) (struct file *);
86972-};
86973+} __do_const;
86974
86975 /*
86976 * Newer version of video_device, handled by videodev2.c
86977diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h
86978index ffb69da..040393e 100644
86979--- a/include/media/v4l2-device.h
86980+++ b/include/media/v4l2-device.h
86981@@ -95,7 +95,7 @@ int __must_check v4l2_device_register(struct device *dev, struct v4l2_device *v4
86982 this function returns 0. If the name ends with a digit (e.g. cx18),
86983 then the name will be set to cx18-0 since cx180 looks really odd. */
86984 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
86985- atomic_t *instance);
86986+ atomic_unchecked_t *instance);
86987
86988 /* Set v4l2_dev->dev to NULL. Call when the USB parent disconnects.
86989 Since the parent disappears this ensures that v4l2_dev doesn't have an
86990diff --git a/include/net/9p/transport.h b/include/net/9p/transport.h
86991index d9fa68f..45c88d1 100644
86992--- a/include/net/9p/transport.h
86993+++ b/include/net/9p/transport.h
86994@@ -63,7 +63,7 @@ struct p9_trans_module {
86995 int (*cancelled)(struct p9_client *, struct p9_req_t *req);
86996 int (*zc_request)(struct p9_client *, struct p9_req_t *,
86997 char *, char *, int , int, int, int);
86998-};
86999+} __do_const;
87000
87001 void v9fs_register_trans(struct p9_trans_module *m);
87002 void v9fs_unregister_trans(struct p9_trans_module *m);
87003diff --git a/include/net/af_unix.h b/include/net/af_unix.h
87004index a175ba4..196eb82 100644
87005--- a/include/net/af_unix.h
87006+++ b/include/net/af_unix.h
87007@@ -36,7 +36,7 @@ struct unix_skb_parms {
87008 u32 secid; /* Security ID */
87009 #endif
87010 u32 consumed;
87011-};
87012+} __randomize_layout;
87013
87014 #define UNIXCB(skb) (*(struct unix_skb_parms *)&((skb)->cb))
87015 #define UNIXSID(skb) (&UNIXCB((skb)).secid)
87016diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
87017index 8df15ad..837fbedd 100644
87018--- a/include/net/bluetooth/l2cap.h
87019+++ b/include/net/bluetooth/l2cap.h
87020@@ -608,7 +608,7 @@ struct l2cap_ops {
87021 unsigned char *kdata,
87022 struct iovec *iov,
87023 int len);
87024-};
87025+} __do_const;
87026
87027 struct l2cap_conn {
87028 struct hci_conn *hcon;
87029diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
87030index f2ae33d..c457cf0 100644
87031--- a/include/net/caif/cfctrl.h
87032+++ b/include/net/caif/cfctrl.h
87033@@ -52,7 +52,7 @@ struct cfctrl_rsp {
87034 void (*radioset_rsp)(void);
87035 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
87036 struct cflayer *client_layer);
87037-};
87038+} __no_const;
87039
87040 /* Link Setup Parameters for CAIF-Links. */
87041 struct cfctrl_link_param {
87042@@ -101,8 +101,8 @@ struct cfctrl_request_info {
87043 struct cfctrl {
87044 struct cfsrvl serv;
87045 struct cfctrl_rsp res;
87046- atomic_t req_seq_no;
87047- atomic_t rsp_seq_no;
87048+ atomic_unchecked_t req_seq_no;
87049+ atomic_unchecked_t rsp_seq_no;
87050 struct list_head list;
87051 /* Protects from simultaneous access to first_req list */
87052 spinlock_t info_list_lock;
87053diff --git a/include/net/flow.h b/include/net/flow.h
87054index 8109a15..504466d 100644
87055--- a/include/net/flow.h
87056+++ b/include/net/flow.h
87057@@ -231,6 +231,6 @@ void flow_cache_fini(struct net *net);
87058
87059 void flow_cache_flush(struct net *net);
87060 void flow_cache_flush_deferred(struct net *net);
87061-extern atomic_t flow_cache_genid;
87062+extern atomic_unchecked_t flow_cache_genid;
87063
87064 #endif
87065diff --git a/include/net/genetlink.h b/include/net/genetlink.h
87066index af10c2c..a431cc5 100644
87067--- a/include/net/genetlink.h
87068+++ b/include/net/genetlink.h
87069@@ -120,7 +120,7 @@ struct genl_ops {
87070 u8 cmd;
87071 u8 internal_flags;
87072 u8 flags;
87073-};
87074+} __do_const;
87075
87076 int __genl_register_family(struct genl_family *family);
87077
87078diff --git a/include/net/gro_cells.h b/include/net/gro_cells.h
87079index 734d9b5..48a9a4b 100644
87080--- a/include/net/gro_cells.h
87081+++ b/include/net/gro_cells.h
87082@@ -29,7 +29,7 @@ static inline void gro_cells_receive(struct gro_cells *gcells, struct sk_buff *s
87083 cell += skb_get_rx_queue(skb) & gcells->gro_cells_mask;
87084
87085 if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) {
87086- atomic_long_inc(&dev->rx_dropped);
87087+ atomic_long_inc_unchecked(&dev->rx_dropped);
87088 kfree_skb(skb);
87089 return;
87090 }
87091diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
87092index 5fbe656..9ed3d8b 100644
87093--- a/include/net/inet_connection_sock.h
87094+++ b/include/net/inet_connection_sock.h
87095@@ -63,7 +63,7 @@ struct inet_connection_sock_af_ops {
87096 int (*bind_conflict)(const struct sock *sk,
87097 const struct inet_bind_bucket *tb, bool relax);
87098 void (*mtu_reduced)(struct sock *sk);
87099-};
87100+} __do_const;
87101
87102 /** inet_connection_sock - INET connection oriented sock
87103 *
87104diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
87105index 01d590e..f69c61d 100644
87106--- a/include/net/inetpeer.h
87107+++ b/include/net/inetpeer.h
87108@@ -47,7 +47,7 @@ struct inet_peer {
87109 */
87110 union {
87111 struct {
87112- atomic_t rid; /* Frag reception counter */
87113+ atomic_unchecked_t rid; /* Frag reception counter */
87114 };
87115 struct rcu_head rcu;
87116 struct inet_peer *gc_next;
87117diff --git a/include/net/ip.h b/include/net/ip.h
87118index db4a771..965a42a 100644
87119--- a/include/net/ip.h
87120+++ b/include/net/ip.h
87121@@ -316,7 +316,7 @@ static inline unsigned int ip_skb_dst_mtu(const struct sk_buff *skb)
87122 }
87123 }
87124
87125-u32 ip_idents_reserve(u32 hash, int segs);
87126+u32 ip_idents_reserve(u32 hash, int segs) __intentional_overflow(-1);
87127 void __ip_select_ident(struct iphdr *iph, int segs);
87128
87129 static inline void ip_select_ident_segs(struct sk_buff *skb, struct sock *sk, int segs)
87130diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
87131index 9922093..a1755d6 100644
87132--- a/include/net/ip_fib.h
87133+++ b/include/net/ip_fib.h
87134@@ -169,7 +169,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
87135
87136 #define FIB_RES_SADDR(net, res) \
87137 ((FIB_RES_NH(res).nh_saddr_genid == \
87138- atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
87139+ atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
87140 FIB_RES_NH(res).nh_saddr : \
87141 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
87142 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
87143diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
87144index 624a8a5..b1e2a24 100644
87145--- a/include/net/ip_vs.h
87146+++ b/include/net/ip_vs.h
87147@@ -558,7 +558,7 @@ struct ip_vs_conn {
87148 struct ip_vs_conn *control; /* Master control connection */
87149 atomic_t n_control; /* Number of controlled ones */
87150 struct ip_vs_dest *dest; /* real server */
87151- atomic_t in_pkts; /* incoming packet counter */
87152+ atomic_unchecked_t in_pkts; /* incoming packet counter */
87153
87154 /* packet transmitter for different forwarding methods. If it
87155 mangles the packet, it must return NF_DROP or better NF_STOLEN,
87156@@ -705,7 +705,7 @@ struct ip_vs_dest {
87157 __be16 port; /* port number of the server */
87158 union nf_inet_addr addr; /* IP address of the server */
87159 volatile unsigned int flags; /* dest status flags */
87160- atomic_t conn_flags; /* flags to copy to conn */
87161+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
87162 atomic_t weight; /* server weight */
87163
87164 atomic_t refcnt; /* reference counter */
87165@@ -960,11 +960,11 @@ struct netns_ipvs {
87166 /* ip_vs_lblc */
87167 int sysctl_lblc_expiration;
87168 struct ctl_table_header *lblc_ctl_header;
87169- struct ctl_table *lblc_ctl_table;
87170+ ctl_table_no_const *lblc_ctl_table;
87171 /* ip_vs_lblcr */
87172 int sysctl_lblcr_expiration;
87173 struct ctl_table_header *lblcr_ctl_header;
87174- struct ctl_table *lblcr_ctl_table;
87175+ ctl_table_no_const *lblcr_ctl_table;
87176 /* ip_vs_est */
87177 struct list_head est_list; /* estimator list */
87178 spinlock_t est_lock;
87179diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
87180index 8d4f588..2e37ad2 100644
87181--- a/include/net/irda/ircomm_tty.h
87182+++ b/include/net/irda/ircomm_tty.h
87183@@ -33,6 +33,7 @@
87184 #include <linux/termios.h>
87185 #include <linux/timer.h>
87186 #include <linux/tty.h> /* struct tty_struct */
87187+#include <asm/local.h>
87188
87189 #include <net/irda/irias_object.h>
87190 #include <net/irda/ircomm_core.h>
87191diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
87192index 714cc9a..ea05f3e 100644
87193--- a/include/net/iucv/af_iucv.h
87194+++ b/include/net/iucv/af_iucv.h
87195@@ -149,7 +149,7 @@ struct iucv_skb_cb {
87196 struct iucv_sock_list {
87197 struct hlist_head head;
87198 rwlock_t lock;
87199- atomic_t autobind_name;
87200+ atomic_unchecked_t autobind_name;
87201 };
87202
87203 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
87204diff --git a/include/net/llc_c_ac.h b/include/net/llc_c_ac.h
87205index f3be818..bf46196 100644
87206--- a/include/net/llc_c_ac.h
87207+++ b/include/net/llc_c_ac.h
87208@@ -87,7 +87,7 @@
87209 #define LLC_CONN_AC_STOP_SENDACK_TMR 70
87210 #define LLC_CONN_AC_START_SENDACK_TMR_IF_NOT_RUNNING 71
87211
87212-typedef int (*llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
87213+typedef int (* const llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
87214
87215 int llc_conn_ac_clear_remote_busy(struct sock *sk, struct sk_buff *skb);
87216 int llc_conn_ac_conn_ind(struct sock *sk, struct sk_buff *skb);
87217diff --git a/include/net/llc_c_ev.h b/include/net/llc_c_ev.h
87218index 3948cf1..83b28c4 100644
87219--- a/include/net/llc_c_ev.h
87220+++ b/include/net/llc_c_ev.h
87221@@ -125,8 +125,8 @@ static __inline__ struct llc_conn_state_ev *llc_conn_ev(struct sk_buff *skb)
87222 return (struct llc_conn_state_ev *)skb->cb;
87223 }
87224
87225-typedef int (*llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
87226-typedef int (*llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
87227+typedef int (* const llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
87228+typedef int (* const llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
87229
87230 int llc_conn_ev_conn_req(struct sock *sk, struct sk_buff *skb);
87231 int llc_conn_ev_data_req(struct sock *sk, struct sk_buff *skb);
87232diff --git a/include/net/llc_c_st.h b/include/net/llc_c_st.h
87233index 0e79cfb..f46db31 100644
87234--- a/include/net/llc_c_st.h
87235+++ b/include/net/llc_c_st.h
87236@@ -37,7 +37,7 @@ struct llc_conn_state_trans {
87237 u8 next_state;
87238 llc_conn_ev_qfyr_t *ev_qualifiers;
87239 llc_conn_action_t *ev_actions;
87240-};
87241+} __do_const;
87242
87243 struct llc_conn_state {
87244 u8 current_state;
87245diff --git a/include/net/llc_s_ac.h b/include/net/llc_s_ac.h
87246index a61b98c..aade1eb 100644
87247--- a/include/net/llc_s_ac.h
87248+++ b/include/net/llc_s_ac.h
87249@@ -23,7 +23,7 @@
87250 #define SAP_ACT_TEST_IND 9
87251
87252 /* All action functions must look like this */
87253-typedef int (*llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
87254+typedef int (* const llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
87255
87256 int llc_sap_action_unitdata_ind(struct llc_sap *sap, struct sk_buff *skb);
87257 int llc_sap_action_send_ui(struct llc_sap *sap, struct sk_buff *skb);
87258diff --git a/include/net/llc_s_st.h b/include/net/llc_s_st.h
87259index 567c681..cd73ac02 100644
87260--- a/include/net/llc_s_st.h
87261+++ b/include/net/llc_s_st.h
87262@@ -20,7 +20,7 @@ struct llc_sap_state_trans {
87263 llc_sap_ev_t ev;
87264 u8 next_state;
87265 llc_sap_action_t *ev_actions;
87266-};
87267+} __do_const;
87268
87269 struct llc_sap_state {
87270 u8 curr_state;
87271diff --git a/include/net/mac80211.h b/include/net/mac80211.h
87272index dae2e24..89336e6 100644
87273--- a/include/net/mac80211.h
87274+++ b/include/net/mac80211.h
87275@@ -4650,7 +4650,7 @@ struct rate_control_ops {
87276 void (*remove_sta_debugfs)(void *priv, void *priv_sta);
87277
87278 u32 (*get_expected_throughput)(void *priv_sta);
87279-};
87280+} __do_const;
87281
87282 static inline int rate_supported(struct ieee80211_sta *sta,
87283 enum ieee80211_band band,
87284diff --git a/include/net/neighbour.h b/include/net/neighbour.h
87285index 47f4254..fd095bc 100644
87286--- a/include/net/neighbour.h
87287+++ b/include/net/neighbour.h
87288@@ -163,7 +163,7 @@ struct neigh_ops {
87289 void (*error_report)(struct neighbour *, struct sk_buff *);
87290 int (*output)(struct neighbour *, struct sk_buff *);
87291 int (*connected_output)(struct neighbour *, struct sk_buff *);
87292-};
87293+} __do_const;
87294
87295 struct pneigh_entry {
87296 struct pneigh_entry *next;
87297@@ -217,7 +217,7 @@ struct neigh_table {
87298 struct neigh_statistics __percpu *stats;
87299 struct neigh_hash_table __rcu *nht;
87300 struct pneigh_entry **phash_buckets;
87301-};
87302+} __randomize_layout;
87303
87304 static inline int neigh_parms_family(struct neigh_parms *p)
87305 {
87306diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
87307index e0d6466..e2f3003 100644
87308--- a/include/net/net_namespace.h
87309+++ b/include/net/net_namespace.h
87310@@ -129,8 +129,8 @@ struct net {
87311 struct netns_ipvs *ipvs;
87312 #endif
87313 struct sock *diag_nlsk;
87314- atomic_t fnhe_genid;
87315-};
87316+ atomic_unchecked_t fnhe_genid;
87317+} __randomize_layout;
87318
87319 #include <linux/seq_file_net.h>
87320
87321@@ -286,7 +286,11 @@ static inline struct net *read_pnet(struct net * const *pnet)
87322 #define __net_init __init
87323 #define __net_exit __exit_refok
87324 #define __net_initdata __initdata
87325+#ifdef CONSTIFY_PLUGIN
87326 #define __net_initconst __initconst
87327+#else
87328+#define __net_initconst __initdata
87329+#endif
87330 #endif
87331
87332 struct pernet_operations {
87333@@ -296,7 +300,7 @@ struct pernet_operations {
87334 void (*exit_batch)(struct list_head *net_exit_list);
87335 int *id;
87336 size_t size;
87337-};
87338+} __do_const;
87339
87340 /*
87341 * Use these carefully. If you implement a network device and it
87342@@ -344,12 +348,12 @@ static inline void unregister_net_sysctl_table(struct ctl_table_header *header)
87343
87344 static inline int rt_genid_ipv4(struct net *net)
87345 {
87346- return atomic_read(&net->ipv4.rt_genid);
87347+ return atomic_read_unchecked(&net->ipv4.rt_genid);
87348 }
87349
87350 static inline void rt_genid_bump_ipv4(struct net *net)
87351 {
87352- atomic_inc(&net->ipv4.rt_genid);
87353+ atomic_inc_unchecked(&net->ipv4.rt_genid);
87354 }
87355
87356 extern void (*__fib6_flush_trees)(struct net *net);
87357@@ -376,12 +380,12 @@ static inline void rt_genid_bump_all(struct net *net)
87358
87359 static inline int fnhe_genid(struct net *net)
87360 {
87361- return atomic_read(&net->fnhe_genid);
87362+ return atomic_read_unchecked(&net->fnhe_genid);
87363 }
87364
87365 static inline void fnhe_genid_bump(struct net *net)
87366 {
87367- atomic_inc(&net->fnhe_genid);
87368+ atomic_inc_unchecked(&net->fnhe_genid);
87369 }
87370
87371 #endif /* __NET_NET_NAMESPACE_H */
87372diff --git a/include/net/netdma.h b/include/net/netdma.h
87373index 8ba8ce2..99b7fff 100644
87374--- a/include/net/netdma.h
87375+++ b/include/net/netdma.h
87376@@ -24,7 +24,7 @@
87377 #include <linux/dmaengine.h>
87378 #include <linux/skbuff.h>
87379
87380-int dma_skb_copy_datagram_iovec(struct dma_chan* chan,
87381+int __intentional_overflow(3,5) dma_skb_copy_datagram_iovec(struct dma_chan* chan,
87382 struct sk_buff *skb, int offset, struct iovec *to,
87383 size_t len, struct dma_pinned_list *pinned_list);
87384
87385diff --git a/include/net/netlink.h b/include/net/netlink.h
87386index 6c10762..3e5de0c 100644
87387--- a/include/net/netlink.h
87388+++ b/include/net/netlink.h
87389@@ -521,7 +521,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
87390 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
87391 {
87392 if (mark)
87393- skb_trim(skb, (unsigned char *) mark - skb->data);
87394+ skb_trim(skb, (const unsigned char *) mark - skb->data);
87395 }
87396
87397 /**
87398diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
87399index 29d6a94..235d3d8 100644
87400--- a/include/net/netns/conntrack.h
87401+++ b/include/net/netns/conntrack.h
87402@@ -14,10 +14,10 @@ struct nf_conntrack_ecache;
87403 struct nf_proto_net {
87404 #ifdef CONFIG_SYSCTL
87405 struct ctl_table_header *ctl_table_header;
87406- struct ctl_table *ctl_table;
87407+ ctl_table_no_const *ctl_table;
87408 #ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
87409 struct ctl_table_header *ctl_compat_header;
87410- struct ctl_table *ctl_compat_table;
87411+ ctl_table_no_const *ctl_compat_table;
87412 #endif
87413 #endif
87414 unsigned int users;
87415@@ -60,7 +60,7 @@ struct nf_ip_net {
87416 struct nf_icmp_net icmpv6;
87417 #if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
87418 struct ctl_table_header *ctl_table_header;
87419- struct ctl_table *ctl_table;
87420+ ctl_table_no_const *ctl_table;
87421 #endif
87422 };
87423
87424diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
87425index aec5e12..807233f 100644
87426--- a/include/net/netns/ipv4.h
87427+++ b/include/net/netns/ipv4.h
87428@@ -82,7 +82,7 @@ struct netns_ipv4 {
87429
87430 struct ping_group_range ping_group_range;
87431
87432- atomic_t dev_addr_genid;
87433+ atomic_unchecked_t dev_addr_genid;
87434
87435 #ifdef CONFIG_SYSCTL
87436 unsigned long *sysctl_local_reserved_ports;
87437@@ -96,6 +96,6 @@ struct netns_ipv4 {
87438 struct fib_rules_ops *mr_rules_ops;
87439 #endif
87440 #endif
87441- atomic_t rt_genid;
87442+ atomic_unchecked_t rt_genid;
87443 };
87444 #endif
87445diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
87446index eade27a..42894dd 100644
87447--- a/include/net/netns/ipv6.h
87448+++ b/include/net/netns/ipv6.h
87449@@ -75,8 +75,8 @@ struct netns_ipv6 {
87450 struct fib_rules_ops *mr6_rules_ops;
87451 #endif
87452 #endif
87453- atomic_t dev_addr_genid;
87454- atomic_t rt_genid;
87455+ atomic_unchecked_t dev_addr_genid;
87456+ atomic_unchecked_t rt_genid;
87457 };
87458
87459 #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
87460diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h
87461index 3492434..209f58c 100644
87462--- a/include/net/netns/xfrm.h
87463+++ b/include/net/netns/xfrm.h
87464@@ -64,7 +64,7 @@ struct netns_xfrm {
87465
87466 /* flow cache part */
87467 struct flow_cache flow_cache_global;
87468- atomic_t flow_cache_genid;
87469+ atomic_unchecked_t flow_cache_genid;
87470 struct list_head flow_cache_gc_list;
87471 spinlock_t flow_cache_gc_lock;
87472 struct work_struct flow_cache_gc_work;
87473diff --git a/include/net/ping.h b/include/net/ping.h
87474index 026479b..d9b2829 100644
87475--- a/include/net/ping.h
87476+++ b/include/net/ping.h
87477@@ -54,7 +54,7 @@ struct ping_iter_state {
87478
87479 extern struct proto ping_prot;
87480 #if IS_ENABLED(CONFIG_IPV6)
87481-extern struct pingv6_ops pingv6_ops;
87482+extern struct pingv6_ops *pingv6_ops;
87483 #endif
87484
87485 struct pingfakehdr {
87486diff --git a/include/net/protocol.h b/include/net/protocol.h
87487index d6fcc1f..ca277058 100644
87488--- a/include/net/protocol.h
87489+++ b/include/net/protocol.h
87490@@ -49,7 +49,7 @@ struct net_protocol {
87491 * socket lookup?
87492 */
87493 icmp_strict_tag_validation:1;
87494-};
87495+} __do_const;
87496
87497 #if IS_ENABLED(CONFIG_IPV6)
87498 struct inet6_protocol {
87499@@ -62,7 +62,7 @@ struct inet6_protocol {
87500 u8 type, u8 code, int offset,
87501 __be32 info);
87502 unsigned int flags; /* INET6_PROTO_xxx */
87503-};
87504+} __do_const;
87505
87506 #define INET6_PROTO_NOPOLICY 0x1
87507 #define INET6_PROTO_FINAL 0x2
87508diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
87509index e21b9f9..0191ef0 100644
87510--- a/include/net/rtnetlink.h
87511+++ b/include/net/rtnetlink.h
87512@@ -93,7 +93,7 @@ struct rtnl_link_ops {
87513 int (*fill_slave_info)(struct sk_buff *skb,
87514 const struct net_device *dev,
87515 const struct net_device *slave_dev);
87516-};
87517+} __do_const;
87518
87519 int __rtnl_link_register(struct rtnl_link_ops *ops);
87520 void __rtnl_link_unregister(struct rtnl_link_ops *ops);
87521diff --git a/include/net/sctp/checksum.h b/include/net/sctp/checksum.h
87522index 4a5b9a3..ca27d73 100644
87523--- a/include/net/sctp/checksum.h
87524+++ b/include/net/sctp/checksum.h
87525@@ -61,8 +61,8 @@ static inline __le32 sctp_compute_cksum(const struct sk_buff *skb,
87526 unsigned int offset)
87527 {
87528 struct sctphdr *sh = sctp_hdr(skb);
87529- __le32 ret, old = sh->checksum;
87530- const struct skb_checksum_ops ops = {
87531+ __le32 ret, old = sh->checksum;
87532+ static const struct skb_checksum_ops ops = {
87533 .update = sctp_csum_update,
87534 .combine = sctp_csum_combine,
87535 };
87536diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
87537index 7f4eeb3..37e8fe1 100644
87538--- a/include/net/sctp/sm.h
87539+++ b/include/net/sctp/sm.h
87540@@ -80,7 +80,7 @@ typedef void (sctp_timer_event_t) (unsigned long);
87541 typedef struct {
87542 sctp_state_fn_t *fn;
87543 const char *name;
87544-} sctp_sm_table_entry_t;
87545+} __do_const sctp_sm_table_entry_t;
87546
87547 /* A naming convention of "sctp_sf_xxx" applies to all the state functions
87548 * currently in use.
87549@@ -292,7 +292,7 @@ __u32 sctp_generate_tag(const struct sctp_endpoint *);
87550 __u32 sctp_generate_tsn(const struct sctp_endpoint *);
87551
87552 /* Extern declarations for major data structures. */
87553-extern sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
87554+extern sctp_timer_event_t * const sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
87555
87556
87557 /* Get the size of a DATA chunk payload. */
87558diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
87559index 4ff3f67..89ae38e 100644
87560--- a/include/net/sctp/structs.h
87561+++ b/include/net/sctp/structs.h
87562@@ -509,7 +509,7 @@ struct sctp_pf {
87563 void (*to_sk_saddr)(union sctp_addr *, struct sock *sk);
87564 void (*to_sk_daddr)(union sctp_addr *, struct sock *sk);
87565 struct sctp_af *af;
87566-};
87567+} __do_const;
87568
87569
87570 /* Structure to track chunk fragments that have been acked, but peer
87571diff --git a/include/net/sock.h b/include/net/sock.h
87572index b9a5bd0..dcd5f3c 100644
87573--- a/include/net/sock.h
87574+++ b/include/net/sock.h
87575@@ -356,7 +356,7 @@ struct sock {
87576 unsigned int sk_napi_id;
87577 unsigned int sk_ll_usec;
87578 #endif
87579- atomic_t sk_drops;
87580+ atomic_unchecked_t sk_drops;
87581 int sk_rcvbuf;
87582
87583 struct sk_filter __rcu *sk_filter;
87584@@ -1053,7 +1053,7 @@ struct proto {
87585 void (*destroy_cgroup)(struct mem_cgroup *memcg);
87586 struct cg_proto *(*proto_cgroup)(struct mem_cgroup *memcg);
87587 #endif
87588-};
87589+} __randomize_layout;
87590
87591 /*
87592 * Bits in struct cg_proto.flags
87593@@ -1240,7 +1240,7 @@ static inline u64 memcg_memory_allocated_read(struct cg_proto *prot)
87594 return ret >> PAGE_SHIFT;
87595 }
87596
87597-static inline long
87598+static inline long __intentional_overflow(-1)
87599 sk_memory_allocated(const struct sock *sk)
87600 {
87601 struct proto *prot = sk->sk_prot;
87602@@ -1385,7 +1385,7 @@ struct sock_iocb {
87603 struct scm_cookie *scm;
87604 struct msghdr *msg, async_msg;
87605 struct kiocb *kiocb;
87606-};
87607+} __randomize_layout;
87608
87609 static inline struct sock_iocb *kiocb_to_siocb(struct kiocb *iocb)
87610 {
87611@@ -1820,7 +1820,7 @@ static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
87612 }
87613
87614 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
87615- char __user *from, char *to,
87616+ char __user *from, unsigned char *to,
87617 int copy, int offset)
87618 {
87619 if (skb->ip_summed == CHECKSUM_NONE) {
87620@@ -2091,7 +2091,7 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk)
87621 }
87622 }
87623
87624-struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
87625+struct sk_buff * __intentional_overflow(0) sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
87626
87627 /**
87628 * sk_page_frag - return an appropriate page_frag
87629diff --git a/include/net/tcp.h b/include/net/tcp.h
87630index 590e01a..76498f3 100644
87631--- a/include/net/tcp.h
87632+++ b/include/net/tcp.h
87633@@ -523,7 +523,7 @@ void tcp_retransmit_timer(struct sock *sk);
87634 void tcp_xmit_retransmit_queue(struct sock *);
87635 void tcp_simple_retransmit(struct sock *);
87636 int tcp_trim_head(struct sock *, struct sk_buff *, u32);
87637-int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int, gfp_t);
87638+int __intentional_overflow(3) tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int, gfp_t);
87639
87640 void tcp_send_probe0(struct sock *);
87641 void tcp_send_partial(struct sock *);
87642@@ -696,8 +696,8 @@ struct tcp_skb_cb {
87643 struct inet6_skb_parm h6;
87644 #endif
87645 } header; /* For incoming frames */
87646- __u32 seq; /* Starting sequence number */
87647- __u32 end_seq; /* SEQ + FIN + SYN + datalen */
87648+ __u32 seq __intentional_overflow(0); /* Starting sequence number */
87649+ __u32 end_seq __intentional_overflow(0); /* SEQ + FIN + SYN + datalen */
87650 __u32 when; /* used to compute rtt's */
87651 __u8 tcp_flags; /* TCP header flags. (tcp[13]) */
87652
87653@@ -713,7 +713,7 @@ struct tcp_skb_cb {
87654
87655 __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
87656 /* 1 byte hole */
87657- __u32 ack_seq; /* Sequence number ACK'd */
87658+ __u32 ack_seq __intentional_overflow(0); /* Sequence number ACK'd */
87659 };
87660
87661 #define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
87662diff --git a/include/net/xfrm.h b/include/net/xfrm.h
87663index 721e9c3b..3c81bbf 100644
87664--- a/include/net/xfrm.h
87665+++ b/include/net/xfrm.h
87666@@ -285,7 +285,6 @@ struct xfrm_dst;
87667 struct xfrm_policy_afinfo {
87668 unsigned short family;
87669 struct dst_ops *dst_ops;
87670- void (*garbage_collect)(struct net *net);
87671 struct dst_entry *(*dst_lookup)(struct net *net, int tos,
87672 const xfrm_address_t *saddr,
87673 const xfrm_address_t *daddr);
87674@@ -303,7 +302,7 @@ struct xfrm_policy_afinfo {
87675 struct net_device *dev,
87676 const struct flowi *fl);
87677 struct dst_entry *(*blackhole_route)(struct net *net, struct dst_entry *orig);
87678-};
87679+} __do_const;
87680
87681 int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo);
87682 int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo);
87683@@ -342,7 +341,7 @@ struct xfrm_state_afinfo {
87684 int (*transport_finish)(struct sk_buff *skb,
87685 int async);
87686 void (*local_error)(struct sk_buff *skb, u32 mtu);
87687-};
87688+} __do_const;
87689
87690 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
87691 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
87692@@ -437,7 +436,7 @@ struct xfrm_mode {
87693 struct module *owner;
87694 unsigned int encap;
87695 int flags;
87696-};
87697+} __do_const;
87698
87699 /* Flags for xfrm_mode. */
87700 enum {
87701@@ -534,7 +533,7 @@ struct xfrm_policy {
87702 struct timer_list timer;
87703
87704 struct flow_cache_object flo;
87705- atomic_t genid;
87706+ atomic_unchecked_t genid;
87707 u32 priority;
87708 u32 index;
87709 struct xfrm_mark mark;
87710@@ -1167,6 +1166,7 @@ static inline void xfrm_sk_free_policy(struct sock *sk)
87711 }
87712
87713 void xfrm_garbage_collect(struct net *net);
87714+void xfrm_garbage_collect_deferred(struct net *net);
87715
87716 #else
87717
87718@@ -1205,6 +1205,9 @@ static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir,
87719 static inline void xfrm_garbage_collect(struct net *net)
87720 {
87721 }
87722+static inline void xfrm_garbage_collect_deferred(struct net *net)
87723+{
87724+}
87725 #endif
87726
87727 static __inline__
87728diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
87729index 1017e0b..227aa4d 100644
87730--- a/include/rdma/iw_cm.h
87731+++ b/include/rdma/iw_cm.h
87732@@ -122,7 +122,7 @@ struct iw_cm_verbs {
87733 int backlog);
87734
87735 int (*destroy_listen)(struct iw_cm_id *cm_id);
87736-};
87737+} __no_const;
87738
87739 /**
87740 * iw_create_cm_id - Create an IW CM identifier.
87741diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
87742index 52beadf..598734c 100644
87743--- a/include/scsi/libfc.h
87744+++ b/include/scsi/libfc.h
87745@@ -771,6 +771,7 @@ struct libfc_function_template {
87746 */
87747 void (*disc_stop_final) (struct fc_lport *);
87748 };
87749+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
87750
87751 /**
87752 * struct fc_disc - Discovery context
87753@@ -875,7 +876,7 @@ struct fc_lport {
87754 struct fc_vport *vport;
87755
87756 /* Operational Information */
87757- struct libfc_function_template tt;
87758+ libfc_function_template_no_const tt;
87759 u8 link_up;
87760 u8 qfull;
87761 enum fc_lport_state state;
87762diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
87763index 1a0d184..4fb841f 100644
87764--- a/include/scsi/scsi_device.h
87765+++ b/include/scsi/scsi_device.h
87766@@ -185,9 +185,9 @@ struct scsi_device {
87767 unsigned int max_device_blocked; /* what device_blocked counts down from */
87768 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
87769
87770- atomic_t iorequest_cnt;
87771- atomic_t iodone_cnt;
87772- atomic_t ioerr_cnt;
87773+ atomic_unchecked_t iorequest_cnt;
87774+ atomic_unchecked_t iodone_cnt;
87775+ atomic_unchecked_t ioerr_cnt;
87776
87777 struct device sdev_gendev,
87778 sdev_dev;
87779diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
87780index 007a0bc..7188db8 100644
87781--- a/include/scsi/scsi_transport_fc.h
87782+++ b/include/scsi/scsi_transport_fc.h
87783@@ -756,7 +756,8 @@ struct fc_function_template {
87784 unsigned long show_host_system_hostname:1;
87785
87786 unsigned long disable_target_scan:1;
87787-};
87788+} __do_const;
87789+typedef struct fc_function_template __no_const fc_function_template_no_const;
87790
87791
87792 /**
87793diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h
87794index ae6c3b8..fd748ac 100644
87795--- a/include/sound/compress_driver.h
87796+++ b/include/sound/compress_driver.h
87797@@ -128,7 +128,7 @@ struct snd_compr_ops {
87798 struct snd_compr_caps *caps);
87799 int (*get_codec_caps) (struct snd_compr_stream *stream,
87800 struct snd_compr_codec_caps *codec);
87801-};
87802+} __no_const;
87803
87804 /**
87805 * struct snd_compr: Compressed device
87806diff --git a/include/sound/soc.h b/include/sound/soc.h
87807index c83a334..27c8038 100644
87808--- a/include/sound/soc.h
87809+++ b/include/sound/soc.h
87810@@ -817,7 +817,7 @@ struct snd_soc_codec_driver {
87811 /* probe ordering - for components with runtime dependencies */
87812 int probe_order;
87813 int remove_order;
87814-};
87815+} __do_const;
87816
87817 /* SoC platform interface */
87818 struct snd_soc_platform_driver {
87819@@ -861,7 +861,7 @@ struct snd_soc_platform_driver {
87820 unsigned int (*read)(struct snd_soc_platform *, unsigned int);
87821 int (*write)(struct snd_soc_platform *, unsigned int, unsigned int);
87822 int (*bespoke_trigger)(struct snd_pcm_substream *, int);
87823-};
87824+} __do_const;
87825
87826 struct snd_soc_dai_link_component {
87827 const char *name;
87828diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
87829index 9ec9864..e2ee1ee 100644
87830--- a/include/target/target_core_base.h
87831+++ b/include/target/target_core_base.h
87832@@ -761,7 +761,7 @@ struct se_device {
87833 atomic_long_t write_bytes;
87834 /* Active commands on this virtual SE device */
87835 atomic_t simple_cmds;
87836- atomic_t dev_ordered_id;
87837+ atomic_unchecked_t dev_ordered_id;
87838 atomic_t dev_ordered_sync;
87839 atomic_t dev_qf_count;
87840 int export_count;
87841diff --git a/include/trace/events/fs.h b/include/trace/events/fs.h
87842new file mode 100644
87843index 0000000..fb634b7
87844--- /dev/null
87845+++ b/include/trace/events/fs.h
87846@@ -0,0 +1,53 @@
87847+#undef TRACE_SYSTEM
87848+#define TRACE_SYSTEM fs
87849+
87850+#if !defined(_TRACE_FS_H) || defined(TRACE_HEADER_MULTI_READ)
87851+#define _TRACE_FS_H
87852+
87853+#include <linux/fs.h>
87854+#include <linux/tracepoint.h>
87855+
87856+TRACE_EVENT(do_sys_open,
87857+
87858+ TP_PROTO(const char *filename, int flags, int mode),
87859+
87860+ TP_ARGS(filename, flags, mode),
87861+
87862+ TP_STRUCT__entry(
87863+ __string( filename, filename )
87864+ __field( int, flags )
87865+ __field( int, mode )
87866+ ),
87867+
87868+ TP_fast_assign(
87869+ __assign_str(filename, filename);
87870+ __entry->flags = flags;
87871+ __entry->mode = mode;
87872+ ),
87873+
87874+ TP_printk("\"%s\" %x %o",
87875+ __get_str(filename), __entry->flags, __entry->mode)
87876+);
87877+
87878+TRACE_EVENT(open_exec,
87879+
87880+ TP_PROTO(const char *filename),
87881+
87882+ TP_ARGS(filename),
87883+
87884+ TP_STRUCT__entry(
87885+ __string( filename, filename )
87886+ ),
87887+
87888+ TP_fast_assign(
87889+ __assign_str(filename, filename);
87890+ ),
87891+
87892+ TP_printk("\"%s\"",
87893+ __get_str(filename))
87894+);
87895+
87896+#endif /* _TRACE_FS_H */
87897+
87898+/* This part must be outside protection */
87899+#include <trace/define_trace.h>
87900diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
87901index 3608beb..df39d8a 100644
87902--- a/include/trace/events/irq.h
87903+++ b/include/trace/events/irq.h
87904@@ -36,7 +36,7 @@ struct softirq_action;
87905 */
87906 TRACE_EVENT(irq_handler_entry,
87907
87908- TP_PROTO(int irq, struct irqaction *action),
87909+ TP_PROTO(int irq, const struct irqaction *action),
87910
87911 TP_ARGS(irq, action),
87912
87913@@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
87914 */
87915 TRACE_EVENT(irq_handler_exit,
87916
87917- TP_PROTO(int irq, struct irqaction *action, int ret),
87918+ TP_PROTO(int irq, const struct irqaction *action, int ret),
87919
87920 TP_ARGS(irq, action, ret),
87921
87922diff --git a/include/uapi/linux/a.out.h b/include/uapi/linux/a.out.h
87923index 7caf44c..23c6f27 100644
87924--- a/include/uapi/linux/a.out.h
87925+++ b/include/uapi/linux/a.out.h
87926@@ -39,6 +39,14 @@ enum machine_type {
87927 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
87928 };
87929
87930+/* Constants for the N_FLAGS field */
87931+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
87932+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
87933+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
87934+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
87935+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
87936+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
87937+
87938 #if !defined (N_MAGIC)
87939 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
87940 #endif
87941diff --git a/include/uapi/linux/bcache.h b/include/uapi/linux/bcache.h
87942index 22b6ad3..aeba37e 100644
87943--- a/include/uapi/linux/bcache.h
87944+++ b/include/uapi/linux/bcache.h
87945@@ -5,6 +5,7 @@
87946 * Bcache on disk data structures
87947 */
87948
87949+#include <linux/compiler.h>
87950 #include <asm/types.h>
87951
87952 #define BITMASK(name, type, field, offset, size) \
87953@@ -20,8 +21,8 @@ static inline void SET_##name(type *k, __u64 v) \
87954 /* Btree keys - all units are in sectors */
87955
87956 struct bkey {
87957- __u64 high;
87958- __u64 low;
87959+ __u64 high __intentional_overflow(-1);
87960+ __u64 low __intentional_overflow(-1);
87961 __u64 ptr[];
87962 };
87963
87964diff --git a/include/uapi/linux/byteorder/little_endian.h b/include/uapi/linux/byteorder/little_endian.h
87965index d876736..ccce5c0 100644
87966--- a/include/uapi/linux/byteorder/little_endian.h
87967+++ b/include/uapi/linux/byteorder/little_endian.h
87968@@ -42,51 +42,51 @@
87969
87970 static inline __le64 __cpu_to_le64p(const __u64 *p)
87971 {
87972- return (__force __le64)*p;
87973+ return (__force const __le64)*p;
87974 }
87975-static inline __u64 __le64_to_cpup(const __le64 *p)
87976+static inline __u64 __intentional_overflow(-1) __le64_to_cpup(const __le64 *p)
87977 {
87978- return (__force __u64)*p;
87979+ return (__force const __u64)*p;
87980 }
87981 static inline __le32 __cpu_to_le32p(const __u32 *p)
87982 {
87983- return (__force __le32)*p;
87984+ return (__force const __le32)*p;
87985 }
87986 static inline __u32 __le32_to_cpup(const __le32 *p)
87987 {
87988- return (__force __u32)*p;
87989+ return (__force const __u32)*p;
87990 }
87991 static inline __le16 __cpu_to_le16p(const __u16 *p)
87992 {
87993- return (__force __le16)*p;
87994+ return (__force const __le16)*p;
87995 }
87996 static inline __u16 __le16_to_cpup(const __le16 *p)
87997 {
87998- return (__force __u16)*p;
87999+ return (__force const __u16)*p;
88000 }
88001 static inline __be64 __cpu_to_be64p(const __u64 *p)
88002 {
88003- return (__force __be64)__swab64p(p);
88004+ return (__force const __be64)__swab64p(p);
88005 }
88006 static inline __u64 __be64_to_cpup(const __be64 *p)
88007 {
88008- return __swab64p((__u64 *)p);
88009+ return __swab64p((const __u64 *)p);
88010 }
88011 static inline __be32 __cpu_to_be32p(const __u32 *p)
88012 {
88013- return (__force __be32)__swab32p(p);
88014+ return (__force const __be32)__swab32p(p);
88015 }
88016-static inline __u32 __be32_to_cpup(const __be32 *p)
88017+static inline __u32 __intentional_overflow(-1) __be32_to_cpup(const __be32 *p)
88018 {
88019- return __swab32p((__u32 *)p);
88020+ return __swab32p((const __u32 *)p);
88021 }
88022 static inline __be16 __cpu_to_be16p(const __u16 *p)
88023 {
88024- return (__force __be16)__swab16p(p);
88025+ return (__force const __be16)__swab16p(p);
88026 }
88027 static inline __u16 __be16_to_cpup(const __be16 *p)
88028 {
88029- return __swab16p((__u16 *)p);
88030+ return __swab16p((const __u16 *)p);
88031 }
88032 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
88033 #define __le64_to_cpus(x) do { (void)(x); } while (0)
88034diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
88035index ef6103b..d4e65dd 100644
88036--- a/include/uapi/linux/elf.h
88037+++ b/include/uapi/linux/elf.h
88038@@ -37,6 +37,17 @@ typedef __s64 Elf64_Sxword;
88039 #define PT_GNU_EH_FRAME 0x6474e550
88040
88041 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
88042+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
88043+
88044+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
88045+
88046+/* Constants for the e_flags field */
88047+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
88048+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
88049+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
88050+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
88051+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
88052+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
88053
88054 /*
88055 * Extended Numbering
88056@@ -94,6 +105,8 @@ typedef __s64 Elf64_Sxword;
88057 #define DT_DEBUG 21
88058 #define DT_TEXTREL 22
88059 #define DT_JMPREL 23
88060+#define DT_FLAGS 30
88061+ #define DF_TEXTREL 0x00000004
88062 #define DT_ENCODING 32
88063 #define OLD_DT_LOOS 0x60000000
88064 #define DT_LOOS 0x6000000d
88065@@ -240,6 +253,19 @@ typedef struct elf64_hdr {
88066 #define PF_W 0x2
88067 #define PF_X 0x1
88068
88069+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
88070+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
88071+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
88072+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
88073+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
88074+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
88075+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
88076+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
88077+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
88078+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
88079+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
88080+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
88081+
88082 typedef struct elf32_phdr{
88083 Elf32_Word p_type;
88084 Elf32_Off p_offset;
88085@@ -332,6 +358,8 @@ typedef struct elf64_shdr {
88086 #define EI_OSABI 7
88087 #define EI_PAD 8
88088
88089+#define EI_PAX 14
88090+
88091 #define ELFMAG0 0x7f /* EI_MAG */
88092 #define ELFMAG1 'E'
88093 #define ELFMAG2 'L'
88094diff --git a/include/uapi/linux/personality.h b/include/uapi/linux/personality.h
88095index aa169c4..6a2771d 100644
88096--- a/include/uapi/linux/personality.h
88097+++ b/include/uapi/linux/personality.h
88098@@ -30,6 +30,7 @@ enum {
88099 #define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC | \
88100 ADDR_NO_RANDOMIZE | \
88101 ADDR_COMPAT_LAYOUT | \
88102+ ADDR_LIMIT_3GB | \
88103 MMAP_PAGE_ZERO)
88104
88105 /*
88106diff --git a/include/uapi/linux/screen_info.h b/include/uapi/linux/screen_info.h
88107index 7530e74..e714828 100644
88108--- a/include/uapi/linux/screen_info.h
88109+++ b/include/uapi/linux/screen_info.h
88110@@ -43,7 +43,8 @@ struct screen_info {
88111 __u16 pages; /* 0x32 */
88112 __u16 vesa_attributes; /* 0x34 */
88113 __u32 capabilities; /* 0x36 */
88114- __u8 _reserved[6]; /* 0x3a */
88115+ __u16 vesapm_size; /* 0x3a */
88116+ __u8 _reserved[4]; /* 0x3c */
88117 } __attribute__((packed));
88118
88119 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
88120diff --git a/include/uapi/linux/swab.h b/include/uapi/linux/swab.h
88121index 0e011eb..82681b1 100644
88122--- a/include/uapi/linux/swab.h
88123+++ b/include/uapi/linux/swab.h
88124@@ -43,7 +43,7 @@
88125 * ___swab16, ___swab32, ___swab64, ___swahw32, ___swahb32
88126 */
88127
88128-static inline __attribute_const__ __u16 __fswab16(__u16 val)
88129+static inline __intentional_overflow(-1) __attribute_const__ __u16 __fswab16(__u16 val)
88130 {
88131 #ifdef __HAVE_BUILTIN_BSWAP16__
88132 return __builtin_bswap16(val);
88133@@ -54,7 +54,7 @@ static inline __attribute_const__ __u16 __fswab16(__u16 val)
88134 #endif
88135 }
88136
88137-static inline __attribute_const__ __u32 __fswab32(__u32 val)
88138+static inline __intentional_overflow(-1) __attribute_const__ __u32 __fswab32(__u32 val)
88139 {
88140 #ifdef __HAVE_BUILTIN_BSWAP32__
88141 return __builtin_bswap32(val);
88142@@ -65,7 +65,7 @@ static inline __attribute_const__ __u32 __fswab32(__u32 val)
88143 #endif
88144 }
88145
88146-static inline __attribute_const__ __u64 __fswab64(__u64 val)
88147+static inline __intentional_overflow(-1) __attribute_const__ __u64 __fswab64(__u64 val)
88148 {
88149 #ifdef __HAVE_BUILTIN_BSWAP64__
88150 return __builtin_bswap64(val);
88151diff --git a/include/uapi/linux/sysctl.h b/include/uapi/linux/sysctl.h
88152index 43aaba1..1c30b48 100644
88153--- a/include/uapi/linux/sysctl.h
88154+++ b/include/uapi/linux/sysctl.h
88155@@ -155,8 +155,6 @@ enum
88156 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
88157 };
88158
88159-
88160-
88161 /* CTL_VM names: */
88162 enum
88163 {
88164diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
88165index 778a329..1416ffb 100644
88166--- a/include/uapi/linux/videodev2.h
88167+++ b/include/uapi/linux/videodev2.h
88168@@ -1285,7 +1285,7 @@ struct v4l2_ext_control {
88169 union {
88170 __s32 value;
88171 __s64 value64;
88172- char *string;
88173+ char __user *string;
88174 __u8 *p_u8;
88175 __u16 *p_u16;
88176 __u32 *p_u32;
88177diff --git a/include/uapi/linux/xattr.h b/include/uapi/linux/xattr.h
88178index 1590c49..5eab462 100644
88179--- a/include/uapi/linux/xattr.h
88180+++ b/include/uapi/linux/xattr.h
88181@@ -73,5 +73,9 @@
88182 #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
88183 #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
88184
88185+/* User namespace */
88186+#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
88187+#define XATTR_PAX_FLAGS_SUFFIX "flags"
88188+#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
88189
88190 #endif /* _UAPI_LINUX_XATTR_H */
88191diff --git a/include/video/udlfb.h b/include/video/udlfb.h
88192index f9466fa..f4e2b81 100644
88193--- a/include/video/udlfb.h
88194+++ b/include/video/udlfb.h
88195@@ -53,10 +53,10 @@ struct dlfb_data {
88196 u32 pseudo_palette[256];
88197 int blank_mode; /*one of FB_BLANK_ */
88198 /* blit-only rendering path metrics, exposed through sysfs */
88199- atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
88200- atomic_t bytes_identical; /* saved effort with backbuffer comparison */
88201- atomic_t bytes_sent; /* to usb, after compression including overhead */
88202- atomic_t cpu_kcycles_used; /* transpired during pixel processing */
88203+ atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
88204+ atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
88205+ atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
88206+ atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
88207 };
88208
88209 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
88210diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
88211index 30f5362..8ed8ac9 100644
88212--- a/include/video/uvesafb.h
88213+++ b/include/video/uvesafb.h
88214@@ -122,6 +122,7 @@ struct uvesafb_par {
88215 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
88216 u8 pmi_setpal; /* PMI for palette changes */
88217 u16 *pmi_base; /* protected mode interface location */
88218+ u8 *pmi_code; /* protected mode code location */
88219 void *pmi_start;
88220 void *pmi_pal;
88221 u8 *vbe_state_orig; /*
88222diff --git a/init/Kconfig b/init/Kconfig
88223index 80a6907..baf7d53 100644
88224--- a/init/Kconfig
88225+++ b/init/Kconfig
88226@@ -1150,6 +1150,7 @@ endif # CGROUPS
88227
88228 config CHECKPOINT_RESTORE
88229 bool "Checkpoint/restore support" if EXPERT
88230+ depends on !GRKERNSEC
88231 default n
88232 help
88233 Enables additional kernel features in a sake of checkpoint/restore.
88234@@ -1635,7 +1636,7 @@ config SLUB_DEBUG
88235
88236 config COMPAT_BRK
88237 bool "Disable heap randomization"
88238- default y
88239+ default n
88240 help
88241 Randomizing heap placement makes heap exploits harder, but it
88242 also breaks ancient binaries (including anything libc5 based).
88243@@ -1923,7 +1924,7 @@ config INIT_ALL_POSSIBLE
88244 config STOP_MACHINE
88245 bool
88246 default y
88247- depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU
88248+ depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU || GRKERNSEC
88249 help
88250 Need stop_machine() primitive.
88251
88252diff --git a/init/Makefile b/init/Makefile
88253index 7bc47ee..6da2dc7 100644
88254--- a/init/Makefile
88255+++ b/init/Makefile
88256@@ -2,6 +2,9 @@
88257 # Makefile for the linux kernel.
88258 #
88259
88260+ccflags-y := $(GCC_PLUGINS_CFLAGS)
88261+asflags-y := $(GCC_PLUGINS_AFLAGS)
88262+
88263 obj-y := main.o version.o mounts.o
88264 ifneq ($(CONFIG_BLK_DEV_INITRD),y)
88265 obj-y += noinitramfs.o
88266diff --git a/init/do_mounts.c b/init/do_mounts.c
88267index 82f2288..ea1430a 100644
88268--- a/init/do_mounts.c
88269+++ b/init/do_mounts.c
88270@@ -359,11 +359,11 @@ static void __init get_fs_names(char *page)
88271 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
88272 {
88273 struct super_block *s;
88274- int err = sys_mount(name, "/root", fs, flags, data);
88275+ int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
88276 if (err)
88277 return err;
88278
88279- sys_chdir("/root");
88280+ sys_chdir((const char __force_user *)"/root");
88281 s = current->fs->pwd.dentry->d_sb;
88282 ROOT_DEV = s->s_dev;
88283 printk(KERN_INFO
88284@@ -484,18 +484,18 @@ void __init change_floppy(char *fmt, ...)
88285 va_start(args, fmt);
88286 vsprintf(buf, fmt, args);
88287 va_end(args);
88288- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
88289+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
88290 if (fd >= 0) {
88291 sys_ioctl(fd, FDEJECT, 0);
88292 sys_close(fd);
88293 }
88294 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
88295- fd = sys_open("/dev/console", O_RDWR, 0);
88296+ fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
88297 if (fd >= 0) {
88298 sys_ioctl(fd, TCGETS, (long)&termios);
88299 termios.c_lflag &= ~ICANON;
88300 sys_ioctl(fd, TCSETSF, (long)&termios);
88301- sys_read(fd, &c, 1);
88302+ sys_read(fd, (char __user *)&c, 1);
88303 termios.c_lflag |= ICANON;
88304 sys_ioctl(fd, TCSETSF, (long)&termios);
88305 sys_close(fd);
88306@@ -589,8 +589,8 @@ void __init prepare_namespace(void)
88307 mount_root();
88308 out:
88309 devtmpfs_mount("dev");
88310- sys_mount(".", "/", NULL, MS_MOVE, NULL);
88311- sys_chroot(".");
88312+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
88313+ sys_chroot((const char __force_user *)".");
88314 }
88315
88316 static bool is_tmpfs;
88317diff --git a/init/do_mounts.h b/init/do_mounts.h
88318index f5b978a..69dbfe8 100644
88319--- a/init/do_mounts.h
88320+++ b/init/do_mounts.h
88321@@ -15,15 +15,15 @@ extern int root_mountflags;
88322
88323 static inline int create_dev(char *name, dev_t dev)
88324 {
88325- sys_unlink(name);
88326- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
88327+ sys_unlink((char __force_user *)name);
88328+ return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
88329 }
88330
88331 #if BITS_PER_LONG == 32
88332 static inline u32 bstat(char *name)
88333 {
88334 struct stat64 stat;
88335- if (sys_stat64(name, &stat) != 0)
88336+ if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
88337 return 0;
88338 if (!S_ISBLK(stat.st_mode))
88339 return 0;
88340@@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
88341 static inline u32 bstat(char *name)
88342 {
88343 struct stat stat;
88344- if (sys_newstat(name, &stat) != 0)
88345+ if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
88346 return 0;
88347 if (!S_ISBLK(stat.st_mode))
88348 return 0;
88349diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
88350index 3e0878e..8a9d7a0 100644
88351--- a/init/do_mounts_initrd.c
88352+++ b/init/do_mounts_initrd.c
88353@@ -37,13 +37,13 @@ static int init_linuxrc(struct subprocess_info *info, struct cred *new)
88354 {
88355 sys_unshare(CLONE_FS | CLONE_FILES);
88356 /* stdin/stdout/stderr for /linuxrc */
88357- sys_open("/dev/console", O_RDWR, 0);
88358+ sys_open((const char __force_user *)"/dev/console", O_RDWR, 0);
88359 sys_dup(0);
88360 sys_dup(0);
88361 /* move initrd over / and chdir/chroot in initrd root */
88362- sys_chdir("/root");
88363- sys_mount(".", "/", NULL, MS_MOVE, NULL);
88364- sys_chroot(".");
88365+ sys_chdir((const char __force_user *)"/root");
88366+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
88367+ sys_chroot((const char __force_user *)".");
88368 sys_setsid();
88369 return 0;
88370 }
88371@@ -59,8 +59,8 @@ static void __init handle_initrd(void)
88372 create_dev("/dev/root.old", Root_RAM0);
88373 /* mount initrd on rootfs' /root */
88374 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
88375- sys_mkdir("/old", 0700);
88376- sys_chdir("/old");
88377+ sys_mkdir((const char __force_user *)"/old", 0700);
88378+ sys_chdir((const char __force_user *)"/old");
88379
88380 /* try loading default modules from initrd */
88381 load_default_modules();
88382@@ -80,31 +80,31 @@ static void __init handle_initrd(void)
88383 current->flags &= ~PF_FREEZER_SKIP;
88384
88385 /* move initrd to rootfs' /old */
88386- sys_mount("..", ".", NULL, MS_MOVE, NULL);
88387+ sys_mount((char __force_user *)"..", (char __force_user *)".", NULL, MS_MOVE, NULL);
88388 /* switch root and cwd back to / of rootfs */
88389- sys_chroot("..");
88390+ sys_chroot((const char __force_user *)"..");
88391
88392 if (new_decode_dev(real_root_dev) == Root_RAM0) {
88393- sys_chdir("/old");
88394+ sys_chdir((const char __force_user *)"/old");
88395 return;
88396 }
88397
88398- sys_chdir("/");
88399+ sys_chdir((const char __force_user *)"/");
88400 ROOT_DEV = new_decode_dev(real_root_dev);
88401 mount_root();
88402
88403 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
88404- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
88405+ error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
88406 if (!error)
88407 printk("okay\n");
88408 else {
88409- int fd = sys_open("/dev/root.old", O_RDWR, 0);
88410+ int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
88411 if (error == -ENOENT)
88412 printk("/initrd does not exist. Ignored.\n");
88413 else
88414 printk("failed\n");
88415 printk(KERN_NOTICE "Unmounting old root\n");
88416- sys_umount("/old", MNT_DETACH);
88417+ sys_umount((char __force_user *)"/old", MNT_DETACH);
88418 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
88419 if (fd < 0) {
88420 error = fd;
88421@@ -127,11 +127,11 @@ int __init initrd_load(void)
88422 * mounted in the normal path.
88423 */
88424 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
88425- sys_unlink("/initrd.image");
88426+ sys_unlink((const char __force_user *)"/initrd.image");
88427 handle_initrd();
88428 return 1;
88429 }
88430 }
88431- sys_unlink("/initrd.image");
88432+ sys_unlink((const char __force_user *)"/initrd.image");
88433 return 0;
88434 }
88435diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
88436index 8cb6db5..d729f50 100644
88437--- a/init/do_mounts_md.c
88438+++ b/init/do_mounts_md.c
88439@@ -180,7 +180,7 @@ static void __init md_setup_drive(void)
88440 partitioned ? "_d" : "", minor,
88441 md_setup_args[ent].device_names);
88442
88443- fd = sys_open(name, 0, 0);
88444+ fd = sys_open((char __force_user *)name, 0, 0);
88445 if (fd < 0) {
88446 printk(KERN_ERR "md: open failed - cannot start "
88447 "array %s\n", name);
88448@@ -243,7 +243,7 @@ static void __init md_setup_drive(void)
88449 * array without it
88450 */
88451 sys_close(fd);
88452- fd = sys_open(name, 0, 0);
88453+ fd = sys_open((char __force_user *)name, 0, 0);
88454 sys_ioctl(fd, BLKRRPART, 0);
88455 }
88456 sys_close(fd);
88457@@ -293,7 +293,7 @@ static void __init autodetect_raid(void)
88458
88459 wait_for_device_probe();
88460
88461- fd = sys_open("/dev/md0", 0, 0);
88462+ fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
88463 if (fd >= 0) {
88464 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
88465 sys_close(fd);
88466diff --git a/init/init_task.c b/init/init_task.c
88467index ba0a7f36..2bcf1d5 100644
88468--- a/init/init_task.c
88469+++ b/init/init_task.c
88470@@ -22,5 +22,9 @@ EXPORT_SYMBOL(init_task);
88471 * Initial thread structure. Alignment of this is handled by a special
88472 * linker map entry.
88473 */
88474+#ifdef CONFIG_X86
88475+union thread_union init_thread_union __init_task_data;
88476+#else
88477 union thread_union init_thread_union __init_task_data =
88478 { INIT_THREAD_INFO(init_task) };
88479+#endif
88480diff --git a/init/initramfs.c b/init/initramfs.c
88481index bece48c..e911bd8 100644
88482--- a/init/initramfs.c
88483+++ b/init/initramfs.c
88484@@ -25,7 +25,7 @@ static ssize_t __init xwrite(int fd, const char *p, size_t count)
88485
88486 /* sys_write only can write MAX_RW_COUNT aka 2G-4K bytes at most */
88487 while (count) {
88488- ssize_t rv = sys_write(fd, p, count);
88489+ ssize_t rv = sys_write(fd, (char __force_user *)p, count);
88490
88491 if (rv < 0) {
88492 if (rv == -EINTR || rv == -EAGAIN)
88493@@ -107,7 +107,7 @@ static void __init free_hash(void)
88494 }
88495 }
88496
88497-static long __init do_utime(char *filename, time_t mtime)
88498+static long __init do_utime(char __force_user *filename, time_t mtime)
88499 {
88500 struct timespec t[2];
88501
88502@@ -142,7 +142,7 @@ static void __init dir_utime(void)
88503 struct dir_entry *de, *tmp;
88504 list_for_each_entry_safe(de, tmp, &dir_list, list) {
88505 list_del(&de->list);
88506- do_utime(de->name, de->mtime);
88507+ do_utime((char __force_user *)de->name, de->mtime);
88508 kfree(de->name);
88509 kfree(de);
88510 }
88511@@ -304,7 +304,7 @@ static int __init maybe_link(void)
88512 if (nlink >= 2) {
88513 char *old = find_link(major, minor, ino, mode, collected);
88514 if (old)
88515- return (sys_link(old, collected) < 0) ? -1 : 1;
88516+ return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
88517 }
88518 return 0;
88519 }
88520@@ -313,11 +313,11 @@ static void __init clean_path(char *path, umode_t mode)
88521 {
88522 struct stat st;
88523
88524- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
88525+ if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
88526 if (S_ISDIR(st.st_mode))
88527- sys_rmdir(path);
88528+ sys_rmdir((char __force_user *)path);
88529 else
88530- sys_unlink(path);
88531+ sys_unlink((char __force_user *)path);
88532 }
88533 }
88534
88535@@ -338,7 +338,7 @@ static int __init do_name(void)
88536 int openflags = O_WRONLY|O_CREAT;
88537 if (ml != 1)
88538 openflags |= O_TRUNC;
88539- wfd = sys_open(collected, openflags, mode);
88540+ wfd = sys_open((char __force_user *)collected, openflags, mode);
88541
88542 if (wfd >= 0) {
88543 sys_fchown(wfd, uid, gid);
88544@@ -350,17 +350,17 @@ static int __init do_name(void)
88545 }
88546 }
88547 } else if (S_ISDIR(mode)) {
88548- sys_mkdir(collected, mode);
88549- sys_chown(collected, uid, gid);
88550- sys_chmod(collected, mode);
88551+ sys_mkdir((char __force_user *)collected, mode);
88552+ sys_chown((char __force_user *)collected, uid, gid);
88553+ sys_chmod((char __force_user *)collected, mode);
88554 dir_add(collected, mtime);
88555 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
88556 S_ISFIFO(mode) || S_ISSOCK(mode)) {
88557 if (maybe_link() == 0) {
88558- sys_mknod(collected, mode, rdev);
88559- sys_chown(collected, uid, gid);
88560- sys_chmod(collected, mode);
88561- do_utime(collected, mtime);
88562+ sys_mknod((char __force_user *)collected, mode, rdev);
88563+ sys_chown((char __force_user *)collected, uid, gid);
88564+ sys_chmod((char __force_user *)collected, mode);
88565+ do_utime((char __force_user *)collected, mtime);
88566 }
88567 }
88568 return 0;
88569@@ -372,7 +372,7 @@ static int __init do_copy(void)
88570 if (xwrite(wfd, victim, body_len) != body_len)
88571 error("write error");
88572 sys_close(wfd);
88573- do_utime(vcollected, mtime);
88574+ do_utime((char __force_user *)vcollected, mtime);
88575 kfree(vcollected);
88576 eat(body_len);
88577 state = SkipIt;
88578@@ -390,9 +390,9 @@ static int __init do_symlink(void)
88579 {
88580 collected[N_ALIGN(name_len) + body_len] = '\0';
88581 clean_path(collected, 0);
88582- sys_symlink(collected + N_ALIGN(name_len), collected);
88583- sys_lchown(collected, uid, gid);
88584- do_utime(collected, mtime);
88585+ sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
88586+ sys_lchown((char __force_user *)collected, uid, gid);
88587+ do_utime((char __force_user *)collected, mtime);
88588 state = SkipIt;
88589 next_state = Reset;
88590 return 0;
88591diff --git a/init/main.c b/init/main.c
88592index bb1aed9..64f9745 100644
88593--- a/init/main.c
88594+++ b/init/main.c
88595@@ -98,6 +98,8 @@ extern void radix_tree_init(void);
88596 static inline void mark_rodata_ro(void) { }
88597 #endif
88598
88599+extern void grsecurity_init(void);
88600+
88601 /*
88602 * Debug helper: via this flag we know that we are in 'early bootup code'
88603 * where only the boot processor is running with IRQ disabled. This means
88604@@ -159,6 +161,75 @@ static int __init set_reset_devices(char *str)
88605
88606 __setup("reset_devices", set_reset_devices);
88607
88608+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
88609+kgid_t grsec_proc_gid = KGIDT_INIT(CONFIG_GRKERNSEC_PROC_GID);
88610+static int __init setup_grsec_proc_gid(char *str)
88611+{
88612+ grsec_proc_gid = KGIDT_INIT(simple_strtol(str, NULL, 0));
88613+ return 1;
88614+}
88615+__setup("grsec_proc_gid=", setup_grsec_proc_gid);
88616+#endif
88617+
88618+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
88619+unsigned long pax_user_shadow_base __read_only;
88620+EXPORT_SYMBOL(pax_user_shadow_base);
88621+extern char pax_enter_kernel_user[];
88622+extern char pax_exit_kernel_user[];
88623+#endif
88624+
88625+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
88626+static int __init setup_pax_nouderef(char *str)
88627+{
88628+#ifdef CONFIG_X86_32
88629+ unsigned int cpu;
88630+ struct desc_struct *gdt;
88631+
88632+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
88633+ gdt = get_cpu_gdt_table(cpu);
88634+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
88635+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
88636+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
88637+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
88638+ }
88639+ loadsegment(ds, __KERNEL_DS);
88640+ loadsegment(es, __KERNEL_DS);
88641+ loadsegment(ss, __KERNEL_DS);
88642+#else
88643+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
88644+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
88645+ clone_pgd_mask = ~(pgdval_t)0UL;
88646+ pax_user_shadow_base = 0UL;
88647+ setup_clear_cpu_cap(X86_FEATURE_PCID);
88648+ setup_clear_cpu_cap(X86_FEATURE_INVPCID);
88649+#endif
88650+
88651+ return 0;
88652+}
88653+early_param("pax_nouderef", setup_pax_nouderef);
88654+
88655+#ifdef CONFIG_X86_64
88656+static int __init setup_pax_weakuderef(char *str)
88657+{
88658+ if (clone_pgd_mask != ~(pgdval_t)0UL)
88659+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
88660+ return 1;
88661+}
88662+__setup("pax_weakuderef", setup_pax_weakuderef);
88663+#endif
88664+#endif
88665+
88666+#ifdef CONFIG_PAX_SOFTMODE
88667+int pax_softmode;
88668+
88669+static int __init setup_pax_softmode(char *str)
88670+{
88671+ get_option(&str, &pax_softmode);
88672+ return 1;
88673+}
88674+__setup("pax_softmode=", setup_pax_softmode);
88675+#endif
88676+
88677 static const char *argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
88678 const char *envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
88679 static const char *panic_later, *panic_param;
88680@@ -728,7 +799,7 @@ static bool __init_or_module initcall_blacklisted(initcall_t fn)
88681 struct blacklist_entry *entry;
88682 char *fn_name;
88683
88684- fn_name = kasprintf(GFP_KERNEL, "%pf", fn);
88685+ fn_name = kasprintf(GFP_KERNEL, "%pX", fn);
88686 if (!fn_name)
88687 return false;
88688
88689@@ -780,7 +851,7 @@ int __init_or_module do_one_initcall(initcall_t fn)
88690 {
88691 int count = preempt_count();
88692 int ret;
88693- char msgbuf[64];
88694+ const char *msg1 = "", *msg2 = "";
88695
88696 if (initcall_blacklisted(fn))
88697 return -EPERM;
88698@@ -790,18 +861,17 @@ int __init_or_module do_one_initcall(initcall_t fn)
88699 else
88700 ret = fn();
88701
88702- msgbuf[0] = 0;
88703-
88704 if (preempt_count() != count) {
88705- sprintf(msgbuf, "preemption imbalance ");
88706+ msg1 = " preemption imbalance";
88707 preempt_count_set(count);
88708 }
88709 if (irqs_disabled()) {
88710- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
88711+ msg2 = " disabled interrupts";
88712 local_irq_enable();
88713 }
88714- WARN(msgbuf[0], "initcall %pF returned with %s\n", fn, msgbuf);
88715+ WARN(*msg1 || *msg2, "initcall %pF returned with%s%s\n", fn, msg1, msg2);
88716
88717+ add_latent_entropy();
88718 return ret;
88719 }
88720
88721@@ -908,8 +978,8 @@ static int run_init_process(const char *init_filename)
88722 {
88723 argv_init[0] = init_filename;
88724 return do_execve(getname_kernel(init_filename),
88725- (const char __user *const __user *)argv_init,
88726- (const char __user *const __user *)envp_init);
88727+ (const char __user *const __force_user *)argv_init,
88728+ (const char __user *const __force_user *)envp_init);
88729 }
88730
88731 static int try_to_run_init_process(const char *init_filename)
88732@@ -926,6 +996,10 @@ static int try_to_run_init_process(const char *init_filename)
88733 return ret;
88734 }
88735
88736+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
88737+extern int gr_init_ran;
88738+#endif
88739+
88740 static noinline void __init kernel_init_freeable(void);
88741
88742 static int __ref kernel_init(void *unused)
88743@@ -950,6 +1024,11 @@ static int __ref kernel_init(void *unused)
88744 ramdisk_execute_command, ret);
88745 }
88746
88747+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
88748+ /* if no initrd was used, be extra sure we enforce chroot restrictions */
88749+ gr_init_ran = 1;
88750+#endif
88751+
88752 /*
88753 * We try each of these until one succeeds.
88754 *
88755@@ -1005,7 +1084,7 @@ static noinline void __init kernel_init_freeable(void)
88756 do_basic_setup();
88757
88758 /* Open the /dev/console on the rootfs, this should never fail */
88759- if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
88760+ if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
88761 pr_err("Warning: unable to open an initial console.\n");
88762
88763 (void) sys_dup(0);
88764@@ -1018,11 +1097,13 @@ static noinline void __init kernel_init_freeable(void)
88765 if (!ramdisk_execute_command)
88766 ramdisk_execute_command = "/init";
88767
88768- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
88769+ if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
88770 ramdisk_execute_command = NULL;
88771 prepare_namespace();
88772 }
88773
88774+ grsecurity_init();
88775+
88776 /*
88777 * Ok, we have completed the initial bootup, and
88778 * we're essentially up and running. Get rid of the
88779diff --git a/ipc/compat.c b/ipc/compat.c
88780index b5ef4f7..ff31d87 100644
88781--- a/ipc/compat.c
88782+++ b/ipc/compat.c
88783@@ -396,7 +396,7 @@ COMPAT_SYSCALL_DEFINE6(ipc, u32, call, int, first, int, second,
88784 COMPAT_SHMLBA);
88785 if (err < 0)
88786 return err;
88787- return put_user(raddr, (compat_ulong_t *)compat_ptr(third));
88788+ return put_user(raddr, (compat_ulong_t __user *)compat_ptr(third));
88789 }
88790 case SHMDT:
88791 return sys_shmdt(compat_ptr(ptr));
88792diff --git a/ipc/ipc_sysctl.c b/ipc/ipc_sysctl.c
88793index c3f0326..d4e0579 100644
88794--- a/ipc/ipc_sysctl.c
88795+++ b/ipc/ipc_sysctl.c
88796@@ -30,7 +30,7 @@ static void *get_ipc(struct ctl_table *table)
88797 static int proc_ipc_dointvec(struct ctl_table *table, int write,
88798 void __user *buffer, size_t *lenp, loff_t *ppos)
88799 {
88800- struct ctl_table ipc_table;
88801+ ctl_table_no_const ipc_table;
88802
88803 memcpy(&ipc_table, table, sizeof(ipc_table));
88804 ipc_table.data = get_ipc(table);
88805@@ -41,7 +41,7 @@ static int proc_ipc_dointvec(struct ctl_table *table, int write,
88806 static int proc_ipc_dointvec_minmax(struct ctl_table *table, int write,
88807 void __user *buffer, size_t *lenp, loff_t *ppos)
88808 {
88809- struct ctl_table ipc_table;
88810+ ctl_table_no_const ipc_table;
88811
88812 memcpy(&ipc_table, table, sizeof(ipc_table));
88813 ipc_table.data = get_ipc(table);
88814@@ -65,7 +65,7 @@ static int proc_ipc_dointvec_minmax_orphans(struct ctl_table *table, int write,
88815 static int proc_ipc_callback_dointvec_minmax(struct ctl_table *table, int write,
88816 void __user *buffer, size_t *lenp, loff_t *ppos)
88817 {
88818- struct ctl_table ipc_table;
88819+ ctl_table_no_const ipc_table;
88820 size_t lenp_bef = *lenp;
88821 int rc;
88822
88823@@ -88,7 +88,7 @@ static int proc_ipc_callback_dointvec_minmax(struct ctl_table *table, int write,
88824 static int proc_ipc_doulongvec_minmax(struct ctl_table *table, int write,
88825 void __user *buffer, size_t *lenp, loff_t *ppos)
88826 {
88827- struct ctl_table ipc_table;
88828+ ctl_table_no_const ipc_table;
88829 memcpy(&ipc_table, table, sizeof(ipc_table));
88830 ipc_table.data = get_ipc(table);
88831
88832@@ -122,7 +122,7 @@ static void ipc_auto_callback(int val)
88833 static int proc_ipcauto_dointvec_minmax(struct ctl_table *table, int write,
88834 void __user *buffer, size_t *lenp, loff_t *ppos)
88835 {
88836- struct ctl_table ipc_table;
88837+ ctl_table_no_const ipc_table;
88838 size_t lenp_bef = *lenp;
88839 int oldval;
88840 int rc;
88841diff --git a/ipc/mq_sysctl.c b/ipc/mq_sysctl.c
88842index 68d4e95..1477ded 100644
88843--- a/ipc/mq_sysctl.c
88844+++ b/ipc/mq_sysctl.c
88845@@ -25,7 +25,7 @@ static void *get_mq(struct ctl_table *table)
88846 static int proc_mq_dointvec(struct ctl_table *table, int write,
88847 void __user *buffer, size_t *lenp, loff_t *ppos)
88848 {
88849- struct ctl_table mq_table;
88850+ ctl_table_no_const mq_table;
88851 memcpy(&mq_table, table, sizeof(mq_table));
88852 mq_table.data = get_mq(table);
88853
88854@@ -35,7 +35,7 @@ static int proc_mq_dointvec(struct ctl_table *table, int write,
88855 static int proc_mq_dointvec_minmax(struct ctl_table *table, int write,
88856 void __user *buffer, size_t *lenp, loff_t *ppos)
88857 {
88858- struct ctl_table mq_table;
88859+ ctl_table_no_const mq_table;
88860 memcpy(&mq_table, table, sizeof(mq_table));
88861 mq_table.data = get_mq(table);
88862
88863diff --git a/ipc/mqueue.c b/ipc/mqueue.c
88864index 4fcf39a..d3cc2ec 100644
88865--- a/ipc/mqueue.c
88866+++ b/ipc/mqueue.c
88867@@ -278,6 +278,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
88868 mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
88869 info->attr.mq_msgsize);
88870
88871+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
88872 spin_lock(&mq_lock);
88873 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
88874 u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
88875diff --git a/ipc/shm.c b/ipc/shm.c
88876index 7fc9f9f..95e201f 100644
88877--- a/ipc/shm.c
88878+++ b/ipc/shm.c
88879@@ -72,6 +72,14 @@ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp);
88880 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
88881 #endif
88882
88883+#ifdef CONFIG_GRKERNSEC
88884+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
88885+ const u64 shm_createtime, const kuid_t cuid,
88886+ const int shmid);
88887+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
88888+ const u64 shm_createtime);
88889+#endif
88890+
88891 void shm_init_ns(struct ipc_namespace *ns)
88892 {
88893 ns->shm_ctlmax = SHMMAX;
88894@@ -559,6 +567,9 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
88895 shp->shm_lprid = 0;
88896 shp->shm_atim = shp->shm_dtim = 0;
88897 shp->shm_ctim = get_seconds();
88898+#ifdef CONFIG_GRKERNSEC
88899+ shp->shm_createtime = ktime_get_ns();
88900+#endif
88901 shp->shm_segsz = size;
88902 shp->shm_nattch = 0;
88903 shp->shm_file = file;
88904@@ -1095,6 +1106,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
88905 f_mode = FMODE_READ | FMODE_WRITE;
88906 }
88907 if (shmflg & SHM_EXEC) {
88908+
88909+#ifdef CONFIG_PAX_MPROTECT
88910+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
88911+ goto out;
88912+#endif
88913+
88914 prot |= PROT_EXEC;
88915 acc_mode |= S_IXUGO;
88916 }
88917@@ -1119,6 +1136,15 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
88918 if (err)
88919 goto out_unlock;
88920
88921+#ifdef CONFIG_GRKERNSEC
88922+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
88923+ shp->shm_perm.cuid, shmid) ||
88924+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
88925+ err = -EACCES;
88926+ goto out_unlock;
88927+ }
88928+#endif
88929+
88930 ipc_lock_object(&shp->shm_perm);
88931
88932 /* check if shm_destroy() is tearing down shp */
88933@@ -1131,6 +1157,9 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
88934 path = shp->shm_file->f_path;
88935 path_get(&path);
88936 shp->shm_nattch++;
88937+#ifdef CONFIG_GRKERNSEC
88938+ shp->shm_lapid = current->pid;
88939+#endif
88940 size = i_size_read(path.dentry->d_inode);
88941 ipc_unlock_object(&shp->shm_perm);
88942 rcu_read_unlock();
88943diff --git a/ipc/util.c b/ipc/util.c
88944index 27d74e6..8be0be2 100644
88945--- a/ipc/util.c
88946+++ b/ipc/util.c
88947@@ -71,6 +71,8 @@ struct ipc_proc_iface {
88948 int (*show)(struct seq_file *, void *);
88949 };
88950
88951+extern int gr_ipc_permitted(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, int requested_mode, int granted_mode);
88952+
88953 static void ipc_memory_notifier(struct work_struct *work)
88954 {
88955 ipcns_notify(IPCNS_MEMCHANGED);
88956@@ -537,6 +539,10 @@ int ipcperms(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, short flag)
88957 granted_mode >>= 6;
88958 else if (in_group_p(ipcp->cgid) || in_group_p(ipcp->gid))
88959 granted_mode >>= 3;
88960+
88961+ if (!gr_ipc_permitted(ns, ipcp, requested_mode, granted_mode))
88962+ return -1;
88963+
88964 /* is there some bit set in requested_mode but not in granted_mode? */
88965 if ((requested_mode & ~granted_mode & 0007) &&
88966 !ns_capable(ns->user_ns, CAP_IPC_OWNER))
88967diff --git a/kernel/audit.c b/kernel/audit.c
88968index ba2ff5a..c6c0deb 100644
88969--- a/kernel/audit.c
88970+++ b/kernel/audit.c
88971@@ -122,7 +122,7 @@ u32 audit_sig_sid = 0;
88972 3) suppressed due to audit_rate_limit
88973 4) suppressed due to audit_backlog_limit
88974 */
88975-static atomic_t audit_lost = ATOMIC_INIT(0);
88976+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
88977
88978 /* The netlink socket. */
88979 static struct sock *audit_sock;
88980@@ -256,7 +256,7 @@ void audit_log_lost(const char *message)
88981 unsigned long now;
88982 int print;
88983
88984- atomic_inc(&audit_lost);
88985+ atomic_inc_unchecked(&audit_lost);
88986
88987 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
88988
88989@@ -273,7 +273,7 @@ void audit_log_lost(const char *message)
88990 if (print) {
88991 if (printk_ratelimit())
88992 pr_warn("audit_lost=%u audit_rate_limit=%u audit_backlog_limit=%u\n",
88993- atomic_read(&audit_lost),
88994+ atomic_read_unchecked(&audit_lost),
88995 audit_rate_limit,
88996 audit_backlog_limit);
88997 audit_panic(message);
88998@@ -840,7 +840,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
88999 s.pid = audit_pid;
89000 s.rate_limit = audit_rate_limit;
89001 s.backlog_limit = audit_backlog_limit;
89002- s.lost = atomic_read(&audit_lost);
89003+ s.lost = atomic_read_unchecked(&audit_lost);
89004 s.backlog = skb_queue_len(&audit_skb_queue);
89005 s.version = AUDIT_VERSION_LATEST;
89006 s.backlog_wait_time = audit_backlog_wait_time;
89007diff --git a/kernel/auditsc.c b/kernel/auditsc.c
89008index 21eae3c..66db239 100644
89009--- a/kernel/auditsc.c
89010+++ b/kernel/auditsc.c
89011@@ -2023,7 +2023,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
89012 }
89013
89014 /* global counter which is incremented every time something logs in */
89015-static atomic_t session_id = ATOMIC_INIT(0);
89016+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
89017
89018 static int audit_set_loginuid_perm(kuid_t loginuid)
89019 {
89020@@ -2090,7 +2090,7 @@ int audit_set_loginuid(kuid_t loginuid)
89021
89022 /* are we setting or clearing? */
89023 if (uid_valid(loginuid))
89024- sessionid = (unsigned int)atomic_inc_return(&session_id);
89025+ sessionid = (unsigned int)atomic_inc_return_unchecked(&session_id);
89026
89027 task->sessionid = sessionid;
89028 task->loginuid = loginuid;
89029diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
89030index 7f0dbcb..b54bb2c 100644
89031--- a/kernel/bpf/core.c
89032+++ b/kernel/bpf/core.c
89033@@ -22,6 +22,7 @@
89034 */
89035 #include <linux/filter.h>
89036 #include <linux/skbuff.h>
89037+#include <linux/vmalloc.h>
89038 #include <asm/unaligned.h>
89039
89040 /* Registers */
89041@@ -63,6 +64,67 @@ void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, uns
89042 return NULL;
89043 }
89044
89045+struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
89046+{
89047+ gfp_t gfp_flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO |
89048+ gfp_extra_flags;
89049+ struct bpf_work_struct *ws;
89050+ struct bpf_prog *fp;
89051+
89052+ size = round_up(size, PAGE_SIZE);
89053+ fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
89054+ if (fp == NULL)
89055+ return NULL;
89056+
89057+ ws = kmalloc(sizeof(*ws), GFP_KERNEL | gfp_extra_flags);
89058+ if (ws == NULL) {
89059+ vfree(fp);
89060+ return NULL;
89061+ }
89062+
89063+ fp->pages = size / PAGE_SIZE;
89064+ fp->work = ws;
89065+
89066+ return fp;
89067+}
89068+EXPORT_SYMBOL_GPL(bpf_prog_alloc);
89069+
89070+struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
89071+ gfp_t gfp_extra_flags)
89072+{
89073+ gfp_t gfp_flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO |
89074+ gfp_extra_flags;
89075+ struct bpf_prog *fp;
89076+
89077+ BUG_ON(fp_old == NULL);
89078+
89079+ size = round_up(size, PAGE_SIZE);
89080+ if (size <= fp_old->pages * PAGE_SIZE)
89081+ return fp_old;
89082+
89083+ fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
89084+ if (fp != NULL) {
89085+ memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
89086+ fp->pages = size / PAGE_SIZE;
89087+
89088+ /* We keep fp->work from fp_old around in the new
89089+ * reallocated structure.
89090+ */
89091+ fp_old->work = NULL;
89092+ __bpf_prog_free(fp_old);
89093+ }
89094+
89095+ return fp;
89096+}
89097+EXPORT_SYMBOL_GPL(bpf_prog_realloc);
89098+
89099+void __bpf_prog_free(struct bpf_prog *fp)
89100+{
89101+ kfree(fp->work);
89102+ vfree(fp);
89103+}
89104+EXPORT_SYMBOL_GPL(__bpf_prog_free);
89105+
89106 /* Base function for offset calculation. Needs to go into .text section,
89107 * therefore keeping it non-static as well; will also be used by JITs
89108 * anyway later on, so do not let the compiler omit it.
89109@@ -523,12 +585,26 @@ void bpf_prog_select_runtime(struct bpf_prog *fp)
89110
89111 /* Probe if internal BPF can be JITed */
89112 bpf_int_jit_compile(fp);
89113+ /* Lock whole bpf_prog as read-only */
89114+ bpf_prog_lock_ro(fp);
89115 }
89116 EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
89117
89118-/* free internal BPF program */
89119+static void bpf_prog_free_deferred(struct work_struct *work)
89120+{
89121+ struct bpf_work_struct *ws;
89122+
89123+ ws = container_of(work, struct bpf_work_struct, work);
89124+ bpf_jit_free(ws->prog);
89125+}
89126+
89127+/* Free internal BPF program */
89128 void bpf_prog_free(struct bpf_prog *fp)
89129 {
89130- bpf_jit_free(fp);
89131+ struct bpf_work_struct *ws = fp->work;
89132+
89133+ INIT_WORK(&ws->work, bpf_prog_free_deferred);
89134+ ws->prog = fp;
89135+ schedule_work(&ws->work);
89136 }
89137 EXPORT_SYMBOL_GPL(bpf_prog_free);
89138diff --git a/kernel/capability.c b/kernel/capability.c
89139index 989f5bf..d317ca0 100644
89140--- a/kernel/capability.c
89141+++ b/kernel/capability.c
89142@@ -192,6 +192,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
89143 * before modification is attempted and the application
89144 * fails.
89145 */
89146+ if (tocopy > ARRAY_SIZE(kdata))
89147+ return -EFAULT;
89148+
89149 if (copy_to_user(dataptr, kdata, tocopy
89150 * sizeof(struct __user_cap_data_struct))) {
89151 return -EFAULT;
89152@@ -297,10 +300,11 @@ bool has_ns_capability(struct task_struct *t,
89153 int ret;
89154
89155 rcu_read_lock();
89156- ret = security_capable(__task_cred(t), ns, cap);
89157+ ret = security_capable(__task_cred(t), ns, cap) == 0 &&
89158+ gr_task_is_capable(t, __task_cred(t), cap);
89159 rcu_read_unlock();
89160
89161- return (ret == 0);
89162+ return ret;
89163 }
89164
89165 /**
89166@@ -337,10 +341,10 @@ bool has_ns_capability_noaudit(struct task_struct *t,
89167 int ret;
89168
89169 rcu_read_lock();
89170- ret = security_capable_noaudit(__task_cred(t), ns, cap);
89171+ ret = security_capable_noaudit(__task_cred(t), ns, cap) == 0 && gr_task_is_capable_nolog(t, cap);
89172 rcu_read_unlock();
89173
89174- return (ret == 0);
89175+ return ret;
89176 }
89177
89178 /**
89179@@ -378,7 +382,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
89180 BUG();
89181 }
89182
89183- if (security_capable(current_cred(), ns, cap) == 0) {
89184+ if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable(cap)) {
89185 current->flags |= PF_SUPERPRIV;
89186 return true;
89187 }
89188@@ -386,6 +390,21 @@ bool ns_capable(struct user_namespace *ns, int cap)
89189 }
89190 EXPORT_SYMBOL(ns_capable);
89191
89192+bool ns_capable_nolog(struct user_namespace *ns, int cap)
89193+{
89194+ if (unlikely(!cap_valid(cap))) {
89195+ printk(KERN_CRIT "capable_nolog() called with invalid cap=%u\n", cap);
89196+ BUG();
89197+ }
89198+
89199+ if (security_capable_noaudit(current_cred(), ns, cap) == 0 && gr_is_capable_nolog(cap)) {
89200+ current->flags |= PF_SUPERPRIV;
89201+ return true;
89202+ }
89203+ return false;
89204+}
89205+EXPORT_SYMBOL(ns_capable_nolog);
89206+
89207 /**
89208 * file_ns_capable - Determine if the file's opener had a capability in effect
89209 * @file: The file we want to check
89210@@ -427,6 +446,12 @@ bool capable(int cap)
89211 }
89212 EXPORT_SYMBOL(capable);
89213
89214+bool capable_nolog(int cap)
89215+{
89216+ return ns_capable_nolog(&init_user_ns, cap);
89217+}
89218+EXPORT_SYMBOL(capable_nolog);
89219+
89220 /**
89221 * capable_wrt_inode_uidgid - Check nsown_capable and uid and gid mapped
89222 * @inode: The inode in question
89223@@ -444,3 +469,12 @@ bool capable_wrt_inode_uidgid(const struct inode *inode, int cap)
89224 kgid_has_mapping(ns, inode->i_gid);
89225 }
89226 EXPORT_SYMBOL(capable_wrt_inode_uidgid);
89227+
89228+bool capable_wrt_inode_uidgid_nolog(const struct inode *inode, int cap)
89229+{
89230+ struct user_namespace *ns = current_user_ns();
89231+
89232+ return ns_capable_nolog(ns, cap) && kuid_has_mapping(ns, inode->i_uid) &&
89233+ kgid_has_mapping(ns, inode->i_gid);
89234+}
89235+EXPORT_SYMBOL(capable_wrt_inode_uidgid_nolog);
89236diff --git a/kernel/cgroup.c b/kernel/cgroup.c
89237index 3a73f99..4f29fea 100644
89238--- a/kernel/cgroup.c
89239+++ b/kernel/cgroup.c
89240@@ -5341,6 +5341,14 @@ static void cgroup_release_agent(struct work_struct *work)
89241 release_list);
89242 list_del_init(&cgrp->release_list);
89243 raw_spin_unlock(&release_list_lock);
89244+
89245+ /*
89246+ * don't bother calling call_usermodehelper if we haven't
89247+ * configured a binary to execute
89248+ */
89249+ if (cgrp->root->release_agent_path[0] == '\0')
89250+ goto continue_free;
89251+
89252 pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
89253 if (!pathbuf)
89254 goto continue_free;
89255@@ -5539,7 +5547,7 @@ static int cgroup_css_links_read(struct seq_file *seq, void *v)
89256 struct task_struct *task;
89257 int count = 0;
89258
89259- seq_printf(seq, "css_set %p\n", cset);
89260+ seq_printf(seq, "css_set %pK\n", cset);
89261
89262 list_for_each_entry(task, &cset->tasks, cg_list) {
89263 if (count++ > MAX_TASKS_SHOWN_PER_CSS)
89264diff --git a/kernel/compat.c b/kernel/compat.c
89265index ebb3c36..1df606e 100644
89266--- a/kernel/compat.c
89267+++ b/kernel/compat.c
89268@@ -13,6 +13,7 @@
89269
89270 #include <linux/linkage.h>
89271 #include <linux/compat.h>
89272+#include <linux/module.h>
89273 #include <linux/errno.h>
89274 #include <linux/time.h>
89275 #include <linux/signal.h>
89276@@ -220,7 +221,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
89277 mm_segment_t oldfs;
89278 long ret;
89279
89280- restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
89281+ restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
89282 oldfs = get_fs();
89283 set_fs(KERNEL_DS);
89284 ret = hrtimer_nanosleep_restart(restart);
89285@@ -252,7 +253,7 @@ COMPAT_SYSCALL_DEFINE2(nanosleep, struct compat_timespec __user *, rqtp,
89286 oldfs = get_fs();
89287 set_fs(KERNEL_DS);
89288 ret = hrtimer_nanosleep(&tu,
89289- rmtp ? (struct timespec __user *)&rmt : NULL,
89290+ rmtp ? (struct timespec __force_user *)&rmt : NULL,
89291 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
89292 set_fs(oldfs);
89293
89294@@ -379,7 +380,7 @@ COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set)
89295 mm_segment_t old_fs = get_fs();
89296
89297 set_fs(KERNEL_DS);
89298- ret = sys_sigpending((old_sigset_t __user *) &s);
89299+ ret = sys_sigpending((old_sigset_t __force_user *) &s);
89300 set_fs(old_fs);
89301 if (ret == 0)
89302 ret = put_user(s, set);
89303@@ -469,7 +470,7 @@ COMPAT_SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
89304 mm_segment_t old_fs = get_fs();
89305
89306 set_fs(KERNEL_DS);
89307- ret = sys_old_getrlimit(resource, (struct rlimit __user *)&r);
89308+ ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
89309 set_fs(old_fs);
89310
89311 if (!ret) {
89312@@ -551,8 +552,8 @@ COMPAT_SYSCALL_DEFINE4(wait4,
89313 set_fs (KERNEL_DS);
89314 ret = sys_wait4(pid,
89315 (stat_addr ?
89316- (unsigned int __user *) &status : NULL),
89317- options, (struct rusage __user *) &r);
89318+ (unsigned int __force_user *) &status : NULL),
89319+ options, (struct rusage __force_user *) &r);
89320 set_fs (old_fs);
89321
89322 if (ret > 0) {
89323@@ -578,8 +579,8 @@ COMPAT_SYSCALL_DEFINE5(waitid,
89324 memset(&info, 0, sizeof(info));
89325
89326 set_fs(KERNEL_DS);
89327- ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
89328- uru ? (struct rusage __user *)&ru : NULL);
89329+ ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
89330+ uru ? (struct rusage __force_user *)&ru : NULL);
89331 set_fs(old_fs);
89332
89333 if ((ret < 0) || (info.si_signo == 0))
89334@@ -713,8 +714,8 @@ COMPAT_SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags,
89335 oldfs = get_fs();
89336 set_fs(KERNEL_DS);
89337 err = sys_timer_settime(timer_id, flags,
89338- (struct itimerspec __user *) &newts,
89339- (struct itimerspec __user *) &oldts);
89340+ (struct itimerspec __force_user *) &newts,
89341+ (struct itimerspec __force_user *) &oldts);
89342 set_fs(oldfs);
89343 if (!err && old && put_compat_itimerspec(old, &oldts))
89344 return -EFAULT;
89345@@ -731,7 +732,7 @@ COMPAT_SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id,
89346 oldfs = get_fs();
89347 set_fs(KERNEL_DS);
89348 err = sys_timer_gettime(timer_id,
89349- (struct itimerspec __user *) &ts);
89350+ (struct itimerspec __force_user *) &ts);
89351 set_fs(oldfs);
89352 if (!err && put_compat_itimerspec(setting, &ts))
89353 return -EFAULT;
89354@@ -750,7 +751,7 @@ COMPAT_SYSCALL_DEFINE2(clock_settime, clockid_t, which_clock,
89355 oldfs = get_fs();
89356 set_fs(KERNEL_DS);
89357 err = sys_clock_settime(which_clock,
89358- (struct timespec __user *) &ts);
89359+ (struct timespec __force_user *) &ts);
89360 set_fs(oldfs);
89361 return err;
89362 }
89363@@ -765,7 +766,7 @@ COMPAT_SYSCALL_DEFINE2(clock_gettime, clockid_t, which_clock,
89364 oldfs = get_fs();
89365 set_fs(KERNEL_DS);
89366 err = sys_clock_gettime(which_clock,
89367- (struct timespec __user *) &ts);
89368+ (struct timespec __force_user *) &ts);
89369 set_fs(oldfs);
89370 if (!err && compat_put_timespec(&ts, tp))
89371 return -EFAULT;
89372@@ -785,7 +786,7 @@ COMPAT_SYSCALL_DEFINE2(clock_adjtime, clockid_t, which_clock,
89373
89374 oldfs = get_fs();
89375 set_fs(KERNEL_DS);
89376- ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
89377+ ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
89378 set_fs(oldfs);
89379
89380 err = compat_put_timex(utp, &txc);
89381@@ -805,7 +806,7 @@ COMPAT_SYSCALL_DEFINE2(clock_getres, clockid_t, which_clock,
89382 oldfs = get_fs();
89383 set_fs(KERNEL_DS);
89384 err = sys_clock_getres(which_clock,
89385- (struct timespec __user *) &ts);
89386+ (struct timespec __force_user *) &ts);
89387 set_fs(oldfs);
89388 if (!err && tp && compat_put_timespec(&ts, tp))
89389 return -EFAULT;
89390@@ -819,7 +820,7 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
89391 struct timespec tu;
89392 struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
89393
89394- restart->nanosleep.rmtp = (struct timespec __user *) &tu;
89395+ restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
89396 oldfs = get_fs();
89397 set_fs(KERNEL_DS);
89398 err = clock_nanosleep_restart(restart);
89399@@ -851,8 +852,8 @@ COMPAT_SYSCALL_DEFINE4(clock_nanosleep, clockid_t, which_clock, int, flags,
89400 oldfs = get_fs();
89401 set_fs(KERNEL_DS);
89402 err = sys_clock_nanosleep(which_clock, flags,
89403- (struct timespec __user *) &in,
89404- (struct timespec __user *) &out);
89405+ (struct timespec __force_user *) &in,
89406+ (struct timespec __force_user *) &out);
89407 set_fs(oldfs);
89408
89409 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
89410@@ -1146,7 +1147,7 @@ COMPAT_SYSCALL_DEFINE2(sched_rr_get_interval,
89411 mm_segment_t old_fs = get_fs();
89412
89413 set_fs(KERNEL_DS);
89414- ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
89415+ ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
89416 set_fs(old_fs);
89417 if (compat_put_timespec(&t, interval))
89418 return -EFAULT;
89419diff --git a/kernel/configs.c b/kernel/configs.c
89420index c18b1f1..b9a0132 100644
89421--- a/kernel/configs.c
89422+++ b/kernel/configs.c
89423@@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
89424 struct proc_dir_entry *entry;
89425
89426 /* create the current config file */
89427+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
89428+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
89429+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
89430+ &ikconfig_file_ops);
89431+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
89432+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
89433+ &ikconfig_file_ops);
89434+#endif
89435+#else
89436 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
89437 &ikconfig_file_ops);
89438+#endif
89439+
89440 if (!entry)
89441 return -ENOMEM;
89442
89443diff --git a/kernel/cred.c b/kernel/cred.c
89444index e0573a4..26c0fd3 100644
89445--- a/kernel/cred.c
89446+++ b/kernel/cred.c
89447@@ -164,6 +164,16 @@ void exit_creds(struct task_struct *tsk)
89448 validate_creds(cred);
89449 alter_cred_subscribers(cred, -1);
89450 put_cred(cred);
89451+
89452+#ifdef CONFIG_GRKERNSEC_SETXID
89453+ cred = (struct cred *) tsk->delayed_cred;
89454+ if (cred != NULL) {
89455+ tsk->delayed_cred = NULL;
89456+ validate_creds(cred);
89457+ alter_cred_subscribers(cred, -1);
89458+ put_cred(cred);
89459+ }
89460+#endif
89461 }
89462
89463 /**
89464@@ -411,7 +421,7 @@ static bool cred_cap_issubset(const struct cred *set, const struct cred *subset)
89465 * Always returns 0 thus allowing this function to be tail-called at the end
89466 * of, say, sys_setgid().
89467 */
89468-int commit_creds(struct cred *new)
89469+static int __commit_creds(struct cred *new)
89470 {
89471 struct task_struct *task = current;
89472 const struct cred *old = task->real_cred;
89473@@ -430,6 +440,8 @@ int commit_creds(struct cred *new)
89474
89475 get_cred(new); /* we will require a ref for the subj creds too */
89476
89477+ gr_set_role_label(task, new->uid, new->gid);
89478+
89479 /* dumpability changes */
89480 if (!uid_eq(old->euid, new->euid) ||
89481 !gid_eq(old->egid, new->egid) ||
89482@@ -479,6 +491,105 @@ int commit_creds(struct cred *new)
89483 put_cred(old);
89484 return 0;
89485 }
89486+#ifdef CONFIG_GRKERNSEC_SETXID
89487+extern int set_user(struct cred *new);
89488+
89489+void gr_delayed_cred_worker(void)
89490+{
89491+ const struct cred *new = current->delayed_cred;
89492+ struct cred *ncred;
89493+
89494+ current->delayed_cred = NULL;
89495+
89496+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID) && new != NULL) {
89497+ // from doing get_cred on it when queueing this
89498+ put_cred(new);
89499+ return;
89500+ } else if (new == NULL)
89501+ return;
89502+
89503+ ncred = prepare_creds();
89504+ if (!ncred)
89505+ goto die;
89506+ // uids
89507+ ncred->uid = new->uid;
89508+ ncred->euid = new->euid;
89509+ ncred->suid = new->suid;
89510+ ncred->fsuid = new->fsuid;
89511+ // gids
89512+ ncred->gid = new->gid;
89513+ ncred->egid = new->egid;
89514+ ncred->sgid = new->sgid;
89515+ ncred->fsgid = new->fsgid;
89516+ // groups
89517+ set_groups(ncred, new->group_info);
89518+ // caps
89519+ ncred->securebits = new->securebits;
89520+ ncred->cap_inheritable = new->cap_inheritable;
89521+ ncred->cap_permitted = new->cap_permitted;
89522+ ncred->cap_effective = new->cap_effective;
89523+ ncred->cap_bset = new->cap_bset;
89524+
89525+ if (set_user(ncred)) {
89526+ abort_creds(ncred);
89527+ goto die;
89528+ }
89529+
89530+ // from doing get_cred on it when queueing this
89531+ put_cred(new);
89532+
89533+ __commit_creds(ncred);
89534+ return;
89535+die:
89536+ // from doing get_cred on it when queueing this
89537+ put_cred(new);
89538+ do_group_exit(SIGKILL);
89539+}
89540+#endif
89541+
89542+int commit_creds(struct cred *new)
89543+{
89544+#ifdef CONFIG_GRKERNSEC_SETXID
89545+ int ret;
89546+ int schedule_it = 0;
89547+ struct task_struct *t;
89548+ unsigned oldsecurebits = current_cred()->securebits;
89549+
89550+ /* we won't get called with tasklist_lock held for writing
89551+ and interrupts disabled as the cred struct in that case is
89552+ init_cred
89553+ */
89554+ if (grsec_enable_setxid && !current_is_single_threaded() &&
89555+ uid_eq(current_uid(), GLOBAL_ROOT_UID) &&
89556+ !uid_eq(new->uid, GLOBAL_ROOT_UID)) {
89557+ schedule_it = 1;
89558+ }
89559+ ret = __commit_creds(new);
89560+ if (schedule_it) {
89561+ rcu_read_lock();
89562+ read_lock(&tasklist_lock);
89563+ for (t = next_thread(current); t != current;
89564+ t = next_thread(t)) {
89565+ /* we'll check if the thread has uid 0 in
89566+ * the delayed worker routine
89567+ */
89568+ if (task_securebits(t) == oldsecurebits &&
89569+ t->delayed_cred == NULL) {
89570+ t->delayed_cred = get_cred(new);
89571+ set_tsk_thread_flag(t, TIF_GRSEC_SETXID);
89572+ set_tsk_need_resched(t);
89573+ }
89574+ }
89575+ read_unlock(&tasklist_lock);
89576+ rcu_read_unlock();
89577+ }
89578+
89579+ return ret;
89580+#else
89581+ return __commit_creds(new);
89582+#endif
89583+}
89584+
89585 EXPORT_SYMBOL(commit_creds);
89586
89587 /**
89588diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
89589index 1adf62b..7736e06 100644
89590--- a/kernel/debug/debug_core.c
89591+++ b/kernel/debug/debug_core.c
89592@@ -124,7 +124,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
89593 */
89594 static atomic_t masters_in_kgdb;
89595 static atomic_t slaves_in_kgdb;
89596-static atomic_t kgdb_break_tasklet_var;
89597+static atomic_unchecked_t kgdb_break_tasklet_var;
89598 atomic_t kgdb_setting_breakpoint;
89599
89600 struct task_struct *kgdb_usethread;
89601@@ -134,7 +134,7 @@ int kgdb_single_step;
89602 static pid_t kgdb_sstep_pid;
89603
89604 /* to keep track of the CPU which is doing the single stepping*/
89605-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
89606+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
89607
89608 /*
89609 * If you are debugging a problem where roundup (the collection of
89610@@ -549,7 +549,7 @@ return_normal:
89611 * kernel will only try for the value of sstep_tries before
89612 * giving up and continuing on.
89613 */
89614- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
89615+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
89616 (kgdb_info[cpu].task &&
89617 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
89618 atomic_set(&kgdb_active, -1);
89619@@ -647,8 +647,8 @@ cpu_master_loop:
89620 }
89621
89622 kgdb_restore:
89623- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
89624- int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
89625+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
89626+ int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
89627 if (kgdb_info[sstep_cpu].task)
89628 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
89629 else
89630@@ -925,18 +925,18 @@ static void kgdb_unregister_callbacks(void)
89631 static void kgdb_tasklet_bpt(unsigned long ing)
89632 {
89633 kgdb_breakpoint();
89634- atomic_set(&kgdb_break_tasklet_var, 0);
89635+ atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
89636 }
89637
89638 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
89639
89640 void kgdb_schedule_breakpoint(void)
89641 {
89642- if (atomic_read(&kgdb_break_tasklet_var) ||
89643+ if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
89644 atomic_read(&kgdb_active) != -1 ||
89645 atomic_read(&kgdb_setting_breakpoint))
89646 return;
89647- atomic_inc(&kgdb_break_tasklet_var);
89648+ atomic_inc_unchecked(&kgdb_break_tasklet_var);
89649 tasklet_schedule(&kgdb_tasklet_breakpoint);
89650 }
89651 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
89652diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
89653index 379650b..30c5180 100644
89654--- a/kernel/debug/kdb/kdb_main.c
89655+++ b/kernel/debug/kdb/kdb_main.c
89656@@ -1977,7 +1977,7 @@ static int kdb_lsmod(int argc, const char **argv)
89657 continue;
89658
89659 kdb_printf("%-20s%8u 0x%p ", mod->name,
89660- mod->core_size, (void *)mod);
89661+ mod->core_size_rx + mod->core_size_rw, (void *)mod);
89662 #ifdef CONFIG_MODULE_UNLOAD
89663 kdb_printf("%4ld ", module_refcount(mod));
89664 #endif
89665@@ -1987,7 +1987,7 @@ static int kdb_lsmod(int argc, const char **argv)
89666 kdb_printf(" (Loading)");
89667 else
89668 kdb_printf(" (Live)");
89669- kdb_printf(" 0x%p", mod->module_core);
89670+ kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
89671
89672 #ifdef CONFIG_MODULE_UNLOAD
89673 {
89674diff --git a/kernel/events/core.c b/kernel/events/core.c
89675index 963bf13..a78dd3e 100644
89676--- a/kernel/events/core.c
89677+++ b/kernel/events/core.c
89678@@ -161,8 +161,15 @@ static struct srcu_struct pmus_srcu;
89679 * 0 - disallow raw tracepoint access for unpriv
89680 * 1 - disallow cpu events for unpriv
89681 * 2 - disallow kernel profiling for unpriv
89682+ * 3 - disallow all unpriv perf event use
89683 */
89684-int sysctl_perf_event_paranoid __read_mostly = 1;
89685+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
89686+int sysctl_perf_event_legitimately_concerned __read_mostly = 3;
89687+#elif defined(CONFIG_GRKERNSEC_HIDESYM)
89688+int sysctl_perf_event_legitimately_concerned __read_mostly = 2;
89689+#else
89690+int sysctl_perf_event_legitimately_concerned __read_mostly = 1;
89691+#endif
89692
89693 /* Minimum for 512 kiB + 1 user control page */
89694 int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
89695@@ -188,7 +195,7 @@ void update_perf_cpu_limits(void)
89696
89697 tmp *= sysctl_perf_cpu_time_max_percent;
89698 do_div(tmp, 100);
89699- ACCESS_ONCE(perf_sample_allowed_ns) = tmp;
89700+ ACCESS_ONCE_RW(perf_sample_allowed_ns) = tmp;
89701 }
89702
89703 static int perf_rotate_context(struct perf_cpu_context *cpuctx);
89704@@ -294,7 +301,7 @@ void perf_sample_event_took(u64 sample_len_ns)
89705 }
89706 }
89707
89708-static atomic64_t perf_event_id;
89709+static atomic64_unchecked_t perf_event_id;
89710
89711 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
89712 enum event_type_t event_type);
89713@@ -3034,7 +3041,7 @@ static void __perf_event_read(void *info)
89714
89715 static inline u64 perf_event_count(struct perf_event *event)
89716 {
89717- return local64_read(&event->count) + atomic64_read(&event->child_count);
89718+ return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
89719 }
89720
89721 static u64 perf_event_read(struct perf_event *event)
89722@@ -3410,9 +3417,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
89723 mutex_lock(&event->child_mutex);
89724 total += perf_event_read(event);
89725 *enabled += event->total_time_enabled +
89726- atomic64_read(&event->child_total_time_enabled);
89727+ atomic64_read_unchecked(&event->child_total_time_enabled);
89728 *running += event->total_time_running +
89729- atomic64_read(&event->child_total_time_running);
89730+ atomic64_read_unchecked(&event->child_total_time_running);
89731
89732 list_for_each_entry(child, &event->child_list, child_list) {
89733 total += perf_event_read(child);
89734@@ -3861,10 +3868,10 @@ void perf_event_update_userpage(struct perf_event *event)
89735 userpg->offset -= local64_read(&event->hw.prev_count);
89736
89737 userpg->time_enabled = enabled +
89738- atomic64_read(&event->child_total_time_enabled);
89739+ atomic64_read_unchecked(&event->child_total_time_enabled);
89740
89741 userpg->time_running = running +
89742- atomic64_read(&event->child_total_time_running);
89743+ atomic64_read_unchecked(&event->child_total_time_running);
89744
89745 arch_perf_update_userpage(userpg, now);
89746
89747@@ -4428,7 +4435,7 @@ perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
89748
89749 /* Data. */
89750 sp = perf_user_stack_pointer(regs);
89751- rem = __output_copy_user(handle, (void *) sp, dump_size);
89752+ rem = __output_copy_user(handle, (void __user *) sp, dump_size);
89753 dyn_size = dump_size - rem;
89754
89755 perf_output_skip(handle, rem);
89756@@ -4519,11 +4526,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
89757 values[n++] = perf_event_count(event);
89758 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
89759 values[n++] = enabled +
89760- atomic64_read(&event->child_total_time_enabled);
89761+ atomic64_read_unchecked(&event->child_total_time_enabled);
89762 }
89763 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
89764 values[n++] = running +
89765- atomic64_read(&event->child_total_time_running);
89766+ atomic64_read_unchecked(&event->child_total_time_running);
89767 }
89768 if (read_format & PERF_FORMAT_ID)
89769 values[n++] = primary_event_id(event);
89770@@ -6838,7 +6845,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
89771 event->parent = parent_event;
89772
89773 event->ns = get_pid_ns(task_active_pid_ns(current));
89774- event->id = atomic64_inc_return(&perf_event_id);
89775+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
89776
89777 event->state = PERF_EVENT_STATE_INACTIVE;
89778
89779@@ -7117,6 +7124,11 @@ SYSCALL_DEFINE5(perf_event_open,
89780 if (flags & ~PERF_FLAG_ALL)
89781 return -EINVAL;
89782
89783+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
89784+ if (perf_paranoid_any() && !capable(CAP_SYS_ADMIN))
89785+ return -EACCES;
89786+#endif
89787+
89788 err = perf_copy_attr(attr_uptr, &attr);
89789 if (err)
89790 return err;
89791@@ -7469,10 +7481,10 @@ static void sync_child_event(struct perf_event *child_event,
89792 /*
89793 * Add back the child's count to the parent's count:
89794 */
89795- atomic64_add(child_val, &parent_event->child_count);
89796- atomic64_add(child_event->total_time_enabled,
89797+ atomic64_add_unchecked(child_val, &parent_event->child_count);
89798+ atomic64_add_unchecked(child_event->total_time_enabled,
89799 &parent_event->child_total_time_enabled);
89800- atomic64_add(child_event->total_time_running,
89801+ atomic64_add_unchecked(child_event->total_time_running,
89802 &parent_event->child_total_time_running);
89803
89804 /*
89805diff --git a/kernel/events/internal.h b/kernel/events/internal.h
89806index 569b2187..19940d9 100644
89807--- a/kernel/events/internal.h
89808+++ b/kernel/events/internal.h
89809@@ -81,10 +81,10 @@ static inline unsigned long perf_data_size(struct ring_buffer *rb)
89810 return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
89811 }
89812
89813-#define DEFINE_OUTPUT_COPY(func_name, memcpy_func) \
89814+#define DEFINE_OUTPUT_COPY(func_name, memcpy_func, user) \
89815 static inline unsigned long \
89816 func_name(struct perf_output_handle *handle, \
89817- const void *buf, unsigned long len) \
89818+ const void user *buf, unsigned long len) \
89819 { \
89820 unsigned long size, written; \
89821 \
89822@@ -117,7 +117,7 @@ memcpy_common(void *dst, const void *src, unsigned long n)
89823 return 0;
89824 }
89825
89826-DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)
89827+DEFINE_OUTPUT_COPY(__output_copy, memcpy_common, )
89828
89829 static inline unsigned long
89830 memcpy_skip(void *dst, const void *src, unsigned long n)
89831@@ -125,7 +125,7 @@ memcpy_skip(void *dst, const void *src, unsigned long n)
89832 return 0;
89833 }
89834
89835-DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip)
89836+DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip, )
89837
89838 #ifndef arch_perf_out_copy_user
89839 #define arch_perf_out_copy_user arch_perf_out_copy_user
89840@@ -143,7 +143,7 @@ arch_perf_out_copy_user(void *dst, const void *src, unsigned long n)
89841 }
89842 #endif
89843
89844-DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
89845+DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user, __user)
89846
89847 /* Callchain handling */
89848 extern struct perf_callchain_entry *
89849diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
89850index 1d0af8a..9913530 100644
89851--- a/kernel/events/uprobes.c
89852+++ b/kernel/events/uprobes.c
89853@@ -1671,7 +1671,7 @@ static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr)
89854 {
89855 struct page *page;
89856 uprobe_opcode_t opcode;
89857- int result;
89858+ long result;
89859
89860 pagefault_disable();
89861 result = __copy_from_user_inatomic(&opcode, (void __user*)vaddr,
89862diff --git a/kernel/exit.c b/kernel/exit.c
89863index 32c58f7..9eb6907 100644
89864--- a/kernel/exit.c
89865+++ b/kernel/exit.c
89866@@ -173,6 +173,10 @@ void release_task(struct task_struct *p)
89867 struct task_struct *leader;
89868 int zap_leader;
89869 repeat:
89870+#ifdef CONFIG_NET
89871+ gr_del_task_from_ip_table(p);
89872+#endif
89873+
89874 /* don't need to get the RCU readlock here - the process is dead and
89875 * can't be modifying its own credentials. But shut RCU-lockdep up */
89876 rcu_read_lock();
89877@@ -668,6 +672,8 @@ void do_exit(long code)
89878 struct task_struct *tsk = current;
89879 int group_dead;
89880
89881+ set_fs(USER_DS);
89882+
89883 profile_task_exit(tsk);
89884
89885 WARN_ON(blk_needs_flush_plug(tsk));
89886@@ -684,7 +690,6 @@ void do_exit(long code)
89887 * mm_release()->clear_child_tid() from writing to a user-controlled
89888 * kernel address.
89889 */
89890- set_fs(USER_DS);
89891
89892 ptrace_event(PTRACE_EVENT_EXIT, code);
89893
89894@@ -742,6 +747,9 @@ void do_exit(long code)
89895 tsk->exit_code = code;
89896 taskstats_exit(tsk, group_dead);
89897
89898+ gr_acl_handle_psacct(tsk, code);
89899+ gr_acl_handle_exit();
89900+
89901 exit_mm(tsk);
89902
89903 if (group_dead)
89904@@ -859,7 +867,7 @@ SYSCALL_DEFINE1(exit, int, error_code)
89905 * Take down every thread in the group. This is called by fatal signals
89906 * as well as by sys_exit_group (below).
89907 */
89908-void
89909+__noreturn void
89910 do_group_exit(int exit_code)
89911 {
89912 struct signal_struct *sig = current->signal;
89913diff --git a/kernel/fork.c b/kernel/fork.c
89914index a91e47d..71c9064 100644
89915--- a/kernel/fork.c
89916+++ b/kernel/fork.c
89917@@ -183,6 +183,48 @@ void thread_info_cache_init(void)
89918 # endif
89919 #endif
89920
89921+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
89922+static inline struct thread_info *gr_alloc_thread_info_node(struct task_struct *tsk,
89923+ int node, void **lowmem_stack)
89924+{
89925+ struct page *pages[THREAD_SIZE / PAGE_SIZE];
89926+ void *ret = NULL;
89927+ unsigned int i;
89928+
89929+ *lowmem_stack = alloc_thread_info_node(tsk, node);
89930+ if (*lowmem_stack == NULL)
89931+ goto out;
89932+
89933+ for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++)
89934+ pages[i] = virt_to_page(*lowmem_stack + (i * PAGE_SIZE));
89935+
89936+ /* use VM_IOREMAP to gain THREAD_SIZE alignment */
89937+ ret = vmap(pages, THREAD_SIZE / PAGE_SIZE, VM_IOREMAP, PAGE_KERNEL);
89938+ if (ret == NULL) {
89939+ free_thread_info(*lowmem_stack);
89940+ *lowmem_stack = NULL;
89941+ }
89942+
89943+out:
89944+ return ret;
89945+}
89946+
89947+static inline void gr_free_thread_info(struct task_struct *tsk, struct thread_info *ti)
89948+{
89949+ unmap_process_stacks(tsk);
89950+}
89951+#else
89952+static inline struct thread_info *gr_alloc_thread_info_node(struct task_struct *tsk,
89953+ int node, void **lowmem_stack)
89954+{
89955+ return alloc_thread_info_node(tsk, node);
89956+}
89957+static inline void gr_free_thread_info(struct task_struct *tsk, struct thread_info *ti)
89958+{
89959+ free_thread_info(ti);
89960+}
89961+#endif
89962+
89963 /* SLAB cache for signal_struct structures (tsk->signal) */
89964 static struct kmem_cache *signal_cachep;
89965
89966@@ -201,18 +243,22 @@ struct kmem_cache *vm_area_cachep;
89967 /* SLAB cache for mm_struct structures (tsk->mm) */
89968 static struct kmem_cache *mm_cachep;
89969
89970-static void account_kernel_stack(struct thread_info *ti, int account)
89971+static void account_kernel_stack(struct task_struct *tsk, struct thread_info *ti, int account)
89972 {
89973+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
89974+ struct zone *zone = page_zone(virt_to_page(tsk->lowmem_stack));
89975+#else
89976 struct zone *zone = page_zone(virt_to_page(ti));
89977+#endif
89978
89979 mod_zone_page_state(zone, NR_KERNEL_STACK, account);
89980 }
89981
89982 void free_task(struct task_struct *tsk)
89983 {
89984- account_kernel_stack(tsk->stack, -1);
89985+ account_kernel_stack(tsk, tsk->stack, -1);
89986 arch_release_thread_info(tsk->stack);
89987- free_thread_info(tsk->stack);
89988+ gr_free_thread_info(tsk, tsk->stack);
89989 rt_mutex_debug_task_free(tsk);
89990 ftrace_graph_exit_task(tsk);
89991 put_seccomp_filter(tsk);
89992@@ -299,6 +345,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
89993 struct task_struct *tsk;
89994 struct thread_info *ti;
89995 unsigned long *stackend;
89996+ void *lowmem_stack;
89997 int node = tsk_fork_get_node(orig);
89998 int err;
89999
90000@@ -306,7 +353,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
90001 if (!tsk)
90002 return NULL;
90003
90004- ti = alloc_thread_info_node(tsk, node);
90005+ ti = gr_alloc_thread_info_node(tsk, node, &lowmem_stack);
90006 if (!ti)
90007 goto free_tsk;
90008
90009@@ -315,6 +362,9 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
90010 goto free_ti;
90011
90012 tsk->stack = ti;
90013+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
90014+ tsk->lowmem_stack = lowmem_stack;
90015+#endif
90016 #ifdef CONFIG_SECCOMP
90017 /*
90018 * We must handle setting up seccomp filters once we're under
90019@@ -332,7 +382,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
90020 *stackend = STACK_END_MAGIC; /* for overflow detection */
90021
90022 #ifdef CONFIG_CC_STACKPROTECTOR
90023- tsk->stack_canary = get_random_int();
90024+ tsk->stack_canary = pax_get_random_long();
90025 #endif
90026
90027 /*
90028@@ -346,24 +396,92 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
90029 tsk->splice_pipe = NULL;
90030 tsk->task_frag.page = NULL;
90031
90032- account_kernel_stack(ti, 1);
90033+ account_kernel_stack(tsk, ti, 1);
90034
90035 return tsk;
90036
90037 free_ti:
90038- free_thread_info(ti);
90039+ gr_free_thread_info(tsk, ti);
90040 free_tsk:
90041 free_task_struct(tsk);
90042 return NULL;
90043 }
90044
90045 #ifdef CONFIG_MMU
90046-static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
90047+static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct mm_struct *oldmm, struct vm_area_struct *mpnt)
90048+{
90049+ struct vm_area_struct *tmp;
90050+ unsigned long charge;
90051+ struct file *file;
90052+ int retval;
90053+
90054+ charge = 0;
90055+ if (mpnt->vm_flags & VM_ACCOUNT) {
90056+ unsigned long len = vma_pages(mpnt);
90057+
90058+ if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
90059+ goto fail_nomem;
90060+ charge = len;
90061+ }
90062+ tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
90063+ if (!tmp)
90064+ goto fail_nomem;
90065+ *tmp = *mpnt;
90066+ tmp->vm_mm = mm;
90067+ INIT_LIST_HEAD(&tmp->anon_vma_chain);
90068+ retval = vma_dup_policy(mpnt, tmp);
90069+ if (retval)
90070+ goto fail_nomem_policy;
90071+ if (anon_vma_fork(tmp, mpnt))
90072+ goto fail_nomem_anon_vma_fork;
90073+ tmp->vm_flags &= ~VM_LOCKED;
90074+ tmp->vm_next = tmp->vm_prev = NULL;
90075+ tmp->vm_mirror = NULL;
90076+ file = tmp->vm_file;
90077+ if (file) {
90078+ struct inode *inode = file_inode(file);
90079+ struct address_space *mapping = file->f_mapping;
90080+
90081+ get_file(file);
90082+ if (tmp->vm_flags & VM_DENYWRITE)
90083+ atomic_dec(&inode->i_writecount);
90084+ mutex_lock(&mapping->i_mmap_mutex);
90085+ if (tmp->vm_flags & VM_SHARED)
90086+ atomic_inc(&mapping->i_mmap_writable);
90087+ flush_dcache_mmap_lock(mapping);
90088+ /* insert tmp into the share list, just after mpnt */
90089+ if (unlikely(tmp->vm_flags & VM_NONLINEAR))
90090+ vma_nonlinear_insert(tmp, &mapping->i_mmap_nonlinear);
90091+ else
90092+ vma_interval_tree_insert_after(tmp, mpnt, &mapping->i_mmap);
90093+ flush_dcache_mmap_unlock(mapping);
90094+ mutex_unlock(&mapping->i_mmap_mutex);
90095+ }
90096+
90097+ /*
90098+ * Clear hugetlb-related page reserves for children. This only
90099+ * affects MAP_PRIVATE mappings. Faults generated by the child
90100+ * are not guaranteed to succeed, even if read-only
90101+ */
90102+ if (is_vm_hugetlb_page(tmp))
90103+ reset_vma_resv_huge_pages(tmp);
90104+
90105+ return tmp;
90106+
90107+fail_nomem_anon_vma_fork:
90108+ mpol_put(vma_policy(tmp));
90109+fail_nomem_policy:
90110+ kmem_cache_free(vm_area_cachep, tmp);
90111+fail_nomem:
90112+ vm_unacct_memory(charge);
90113+ return NULL;
90114+}
90115+
90116+static __latent_entropy int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
90117 {
90118 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
90119 struct rb_node **rb_link, *rb_parent;
90120 int retval;
90121- unsigned long charge;
90122
90123 uprobe_start_dup_mmap();
90124 down_write(&oldmm->mmap_sem);
90125@@ -391,55 +509,15 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
90126
90127 prev = NULL;
90128 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
90129- struct file *file;
90130-
90131 if (mpnt->vm_flags & VM_DONTCOPY) {
90132 vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
90133 -vma_pages(mpnt));
90134 continue;
90135 }
90136- charge = 0;
90137- if (mpnt->vm_flags & VM_ACCOUNT) {
90138- unsigned long len = vma_pages(mpnt);
90139-
90140- if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
90141- goto fail_nomem;
90142- charge = len;
90143- }
90144- tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
90145- if (!tmp)
90146- goto fail_nomem;
90147- *tmp = *mpnt;
90148- INIT_LIST_HEAD(&tmp->anon_vma_chain);
90149- retval = vma_dup_policy(mpnt, tmp);
90150- if (retval)
90151- goto fail_nomem_policy;
90152- tmp->vm_mm = mm;
90153- if (anon_vma_fork(tmp, mpnt))
90154- goto fail_nomem_anon_vma_fork;
90155- tmp->vm_flags &= ~VM_LOCKED;
90156- tmp->vm_next = tmp->vm_prev = NULL;
90157- file = tmp->vm_file;
90158- if (file) {
90159- struct inode *inode = file_inode(file);
90160- struct address_space *mapping = file->f_mapping;
90161-
90162- get_file(file);
90163- if (tmp->vm_flags & VM_DENYWRITE)
90164- atomic_dec(&inode->i_writecount);
90165- mutex_lock(&mapping->i_mmap_mutex);
90166- if (tmp->vm_flags & VM_SHARED)
90167- atomic_inc(&mapping->i_mmap_writable);
90168- flush_dcache_mmap_lock(mapping);
90169- /* insert tmp into the share list, just after mpnt */
90170- if (unlikely(tmp->vm_flags & VM_NONLINEAR))
90171- vma_nonlinear_insert(tmp,
90172- &mapping->i_mmap_nonlinear);
90173- else
90174- vma_interval_tree_insert_after(tmp, mpnt,
90175- &mapping->i_mmap);
90176- flush_dcache_mmap_unlock(mapping);
90177- mutex_unlock(&mapping->i_mmap_mutex);
90178+ tmp = dup_vma(mm, oldmm, mpnt);
90179+ if (!tmp) {
90180+ retval = -ENOMEM;
90181+ goto out;
90182 }
90183
90184 /*
90185@@ -471,6 +549,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
90186 if (retval)
90187 goto out;
90188 }
90189+
90190+#ifdef CONFIG_PAX_SEGMEXEC
90191+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
90192+ struct vm_area_struct *mpnt_m;
90193+
90194+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
90195+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
90196+
90197+ if (!mpnt->vm_mirror)
90198+ continue;
90199+
90200+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
90201+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
90202+ mpnt->vm_mirror = mpnt_m;
90203+ } else {
90204+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
90205+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
90206+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
90207+ mpnt->vm_mirror->vm_mirror = mpnt;
90208+ }
90209+ }
90210+ BUG_ON(mpnt_m);
90211+ }
90212+#endif
90213+
90214 /* a new mm has just been created */
90215 arch_dup_mmap(oldmm, mm);
90216 retval = 0;
90217@@ -480,14 +583,6 @@ out:
90218 up_write(&oldmm->mmap_sem);
90219 uprobe_end_dup_mmap();
90220 return retval;
90221-fail_nomem_anon_vma_fork:
90222- mpol_put(vma_policy(tmp));
90223-fail_nomem_policy:
90224- kmem_cache_free(vm_area_cachep, tmp);
90225-fail_nomem:
90226- retval = -ENOMEM;
90227- vm_unacct_memory(charge);
90228- goto out;
90229 }
90230
90231 static inline int mm_alloc_pgd(struct mm_struct *mm)
90232@@ -729,8 +824,8 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
90233 return ERR_PTR(err);
90234
90235 mm = get_task_mm(task);
90236- if (mm && mm != current->mm &&
90237- !ptrace_may_access(task, mode)) {
90238+ if (mm && ((mm != current->mm && !ptrace_may_access(task, mode)) ||
90239+ (mode == PTRACE_MODE_ATTACH && (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))))) {
90240 mmput(mm);
90241 mm = ERR_PTR(-EACCES);
90242 }
90243@@ -933,13 +1028,20 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
90244 spin_unlock(&fs->lock);
90245 return -EAGAIN;
90246 }
90247- fs->users++;
90248+ atomic_inc(&fs->users);
90249 spin_unlock(&fs->lock);
90250 return 0;
90251 }
90252 tsk->fs = copy_fs_struct(fs);
90253 if (!tsk->fs)
90254 return -ENOMEM;
90255+ /* Carry through gr_chroot_dentry and is_chrooted instead
90256+ of recomputing it here. Already copied when the task struct
90257+ is duplicated. This allows pivot_root to not be treated as
90258+ a chroot
90259+ */
90260+ //gr_set_chroot_entries(tsk, &tsk->fs->root);
90261+
90262 return 0;
90263 }
90264
90265@@ -1173,7 +1275,7 @@ init_task_pid(struct task_struct *task, enum pid_type type, struct pid *pid)
90266 * parts of the process environment (as per the clone
90267 * flags). The actual kick-off is left to the caller.
90268 */
90269-static struct task_struct *copy_process(unsigned long clone_flags,
90270+static __latent_entropy struct task_struct *copy_process(unsigned long clone_flags,
90271 unsigned long stack_start,
90272 unsigned long stack_size,
90273 int __user *child_tidptr,
90274@@ -1244,6 +1346,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
90275 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
90276 #endif
90277 retval = -EAGAIN;
90278+
90279+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
90280+
90281 if (atomic_read(&p->real_cred->user->processes) >=
90282 task_rlimit(p, RLIMIT_NPROC)) {
90283 if (p->real_cred->user != INIT_USER &&
90284@@ -1493,6 +1598,11 @@ static struct task_struct *copy_process(unsigned long clone_flags,
90285 goto bad_fork_free_pid;
90286 }
90287
90288+ /* synchronizes with gr_set_acls()
90289+ we need to call this past the point of no return for fork()
90290+ */
90291+ gr_copy_label(p);
90292+
90293 if (likely(p->pid)) {
90294 ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
90295
90296@@ -1583,6 +1693,8 @@ bad_fork_cleanup_count:
90297 bad_fork_free:
90298 free_task(p);
90299 fork_out:
90300+ gr_log_forkfail(retval);
90301+
90302 return ERR_PTR(retval);
90303 }
90304
90305@@ -1644,6 +1756,7 @@ long do_fork(unsigned long clone_flags,
90306
90307 p = copy_process(clone_flags, stack_start, stack_size,
90308 child_tidptr, NULL, trace);
90309+ add_latent_entropy();
90310 /*
90311 * Do this prior waking up the new thread - the thread pointer
90312 * might get invalid after that point, if the thread exits quickly.
90313@@ -1660,6 +1773,8 @@ long do_fork(unsigned long clone_flags,
90314 if (clone_flags & CLONE_PARENT_SETTID)
90315 put_user(nr, parent_tidptr);
90316
90317+ gr_handle_brute_check();
90318+
90319 if (clone_flags & CLONE_VFORK) {
90320 p->vfork_done = &vfork;
90321 init_completion(&vfork);
90322@@ -1778,7 +1893,7 @@ void __init proc_caches_init(void)
90323 mm_cachep = kmem_cache_create("mm_struct",
90324 sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
90325 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
90326- vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC);
90327+ vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC | SLAB_NO_SANITIZE);
90328 mmap_init();
90329 nsproxy_cache_init();
90330 }
90331@@ -1818,7 +1933,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
90332 return 0;
90333
90334 /* don't need lock here; in the worst case we'll do useless copy */
90335- if (fs->users == 1)
90336+ if (atomic_read(&fs->users) == 1)
90337 return 0;
90338
90339 *new_fsp = copy_fs_struct(fs);
90340@@ -1930,7 +2045,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
90341 fs = current->fs;
90342 spin_lock(&fs->lock);
90343 current->fs = new_fs;
90344- if (--fs->users)
90345+ gr_set_chroot_entries(current, &current->fs->root);
90346+ if (atomic_dec_return(&fs->users))
90347 new_fs = NULL;
90348 else
90349 new_fs = fs;
90350diff --git a/kernel/futex.c b/kernel/futex.c
90351index f3a3a07..6820bc0 100644
90352--- a/kernel/futex.c
90353+++ b/kernel/futex.c
90354@@ -202,7 +202,7 @@ struct futex_pi_state {
90355 atomic_t refcount;
90356
90357 union futex_key key;
90358-};
90359+} __randomize_layout;
90360
90361 /**
90362 * struct futex_q - The hashed futex queue entry, one per waiting task
90363@@ -236,7 +236,7 @@ struct futex_q {
90364 struct rt_mutex_waiter *rt_waiter;
90365 union futex_key *requeue_pi_key;
90366 u32 bitset;
90367-};
90368+} __randomize_layout;
90369
90370 static const struct futex_q futex_q_init = {
90371 /* list gets initialized in queue_me()*/
90372@@ -396,6 +396,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
90373 struct page *page, *page_head;
90374 int err, ro = 0;
90375
90376+#ifdef CONFIG_PAX_SEGMEXEC
90377+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
90378+ return -EFAULT;
90379+#endif
90380+
90381 /*
90382 * The futex address must be "naturally" aligned.
90383 */
90384@@ -595,7 +600,7 @@ static int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr,
90385
90386 static int get_futex_value_locked(u32 *dest, u32 __user *from)
90387 {
90388- int ret;
90389+ unsigned long ret;
90390
90391 pagefault_disable();
90392 ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
90393@@ -641,8 +646,14 @@ static struct futex_pi_state * alloc_pi_state(void)
90394 return pi_state;
90395 }
90396
90397+/*
90398+ * Must be called with the hb lock held.
90399+ */
90400 static void free_pi_state(struct futex_pi_state *pi_state)
90401 {
90402+ if (!pi_state)
90403+ return;
90404+
90405 if (!atomic_dec_and_test(&pi_state->refcount))
90406 return;
90407
90408@@ -1521,15 +1532,6 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
90409 }
90410
90411 retry:
90412- if (pi_state != NULL) {
90413- /*
90414- * We will have to lookup the pi_state again, so free this one
90415- * to keep the accounting correct.
90416- */
90417- free_pi_state(pi_state);
90418- pi_state = NULL;
90419- }
90420-
90421 ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ);
90422 if (unlikely(ret != 0))
90423 goto out;
90424@@ -1619,6 +1621,8 @@ retry_private:
90425 case 0:
90426 break;
90427 case -EFAULT:
90428+ free_pi_state(pi_state);
90429+ pi_state = NULL;
90430 double_unlock_hb(hb1, hb2);
90431 hb_waiters_dec(hb2);
90432 put_futex_key(&key2);
90433@@ -1634,6 +1638,8 @@ retry_private:
90434 * exit to complete.
90435 * - The user space value changed.
90436 */
90437+ free_pi_state(pi_state);
90438+ pi_state = NULL;
90439 double_unlock_hb(hb1, hb2);
90440 hb_waiters_dec(hb2);
90441 put_futex_key(&key2);
90442@@ -1710,6 +1716,7 @@ retry_private:
90443 }
90444
90445 out_unlock:
90446+ free_pi_state(pi_state);
90447 double_unlock_hb(hb1, hb2);
90448 hb_waiters_dec(hb2);
90449
90450@@ -1727,8 +1734,6 @@ out_put_keys:
90451 out_put_key1:
90452 put_futex_key(&key1);
90453 out:
90454- if (pi_state != NULL)
90455- free_pi_state(pi_state);
90456 return ret ? ret : task_count;
90457 }
90458
90459@@ -3000,6 +3005,7 @@ static void __init futex_detect_cmpxchg(void)
90460 {
90461 #ifndef CONFIG_HAVE_FUTEX_CMPXCHG
90462 u32 curval;
90463+ mm_segment_t oldfs;
90464
90465 /*
90466 * This will fail and we want it. Some arch implementations do
90467@@ -3011,8 +3017,11 @@ static void __init futex_detect_cmpxchg(void)
90468 * implementation, the non-functional ones will return
90469 * -ENOSYS.
90470 */
90471+ oldfs = get_fs();
90472+ set_fs(USER_DS);
90473 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
90474 futex_cmpxchg_enabled = 1;
90475+ set_fs(oldfs);
90476 #endif
90477 }
90478
90479diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
90480index 55c8c93..9ba7ad6 100644
90481--- a/kernel/futex_compat.c
90482+++ b/kernel/futex_compat.c
90483@@ -32,7 +32,7 @@ fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
90484 return 0;
90485 }
90486
90487-static void __user *futex_uaddr(struct robust_list __user *entry,
90488+static void __user __intentional_overflow(-1) *futex_uaddr(struct robust_list __user *entry,
90489 compat_long_t futex_offset)
90490 {
90491 compat_uptr_t base = ptr_to_compat(entry);
90492diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
90493index b358a80..fc25240 100644
90494--- a/kernel/gcov/base.c
90495+++ b/kernel/gcov/base.c
90496@@ -114,11 +114,6 @@ void gcov_enable_events(void)
90497 }
90498
90499 #ifdef CONFIG_MODULES
90500-static inline int within(void *addr, void *start, unsigned long size)
90501-{
90502- return ((addr >= start) && (addr < start + size));
90503-}
90504-
90505 /* Update list and generate events when modules are unloaded. */
90506 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
90507 void *data)
90508@@ -133,7 +128,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
90509
90510 /* Remove entries located in module from linked list. */
90511 while ((info = gcov_info_next(info))) {
90512- if (within(info, mod->module_core, mod->core_size)) {
90513+ if (within_module_core_rw((unsigned long)info, mod)) {
90514 gcov_info_unlink(prev, info);
90515 if (gcov_events_enabled)
90516 gcov_event(GCOV_REMOVE, info);
90517diff --git a/kernel/jump_label.c b/kernel/jump_label.c
90518index 9019f15..9a3c42e 100644
90519--- a/kernel/jump_label.c
90520+++ b/kernel/jump_label.c
90521@@ -14,6 +14,7 @@
90522 #include <linux/err.h>
90523 #include <linux/static_key.h>
90524 #include <linux/jump_label_ratelimit.h>
90525+#include <linux/mm.h>
90526
90527 #ifdef HAVE_JUMP_LABEL
90528
90529@@ -51,7 +52,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
90530
90531 size = (((unsigned long)stop - (unsigned long)start)
90532 / sizeof(struct jump_entry));
90533+ pax_open_kernel();
90534 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
90535+ pax_close_kernel();
90536 }
90537
90538 static void jump_label_update(struct static_key *key, int enable);
90539@@ -363,10 +366,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
90540 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
90541 struct jump_entry *iter;
90542
90543+ pax_open_kernel();
90544 for (iter = iter_start; iter < iter_stop; iter++) {
90545 if (within_module_init(iter->code, mod))
90546 iter->code = 0;
90547 }
90548+ pax_close_kernel();
90549 }
90550
90551 static int
90552diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
90553index ae51670..c1a9796 100644
90554--- a/kernel/kallsyms.c
90555+++ b/kernel/kallsyms.c
90556@@ -11,6 +11,9 @@
90557 * Changed the compression method from stem compression to "table lookup"
90558 * compression (see scripts/kallsyms.c for a more complete description)
90559 */
90560+#ifdef CONFIG_GRKERNSEC_HIDESYM
90561+#define __INCLUDED_BY_HIDESYM 1
90562+#endif
90563 #include <linux/kallsyms.h>
90564 #include <linux/module.h>
90565 #include <linux/init.h>
90566@@ -54,12 +57,33 @@ extern const unsigned long kallsyms_markers[] __weak;
90567
90568 static inline int is_kernel_inittext(unsigned long addr)
90569 {
90570+ if (system_state != SYSTEM_BOOTING)
90571+ return 0;
90572+
90573 if (addr >= (unsigned long)_sinittext
90574 && addr <= (unsigned long)_einittext)
90575 return 1;
90576 return 0;
90577 }
90578
90579+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
90580+#ifdef CONFIG_MODULES
90581+static inline int is_module_text(unsigned long addr)
90582+{
90583+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
90584+ return 1;
90585+
90586+ addr = ktla_ktva(addr);
90587+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
90588+}
90589+#else
90590+static inline int is_module_text(unsigned long addr)
90591+{
90592+ return 0;
90593+}
90594+#endif
90595+#endif
90596+
90597 static inline int is_kernel_text(unsigned long addr)
90598 {
90599 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
90600@@ -70,13 +94,28 @@ static inline int is_kernel_text(unsigned long addr)
90601
90602 static inline int is_kernel(unsigned long addr)
90603 {
90604+
90605+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
90606+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
90607+ return 1;
90608+
90609+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
90610+#else
90611 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
90612+#endif
90613+
90614 return 1;
90615 return in_gate_area_no_mm(addr);
90616 }
90617
90618 static int is_ksym_addr(unsigned long addr)
90619 {
90620+
90621+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
90622+ if (is_module_text(addr))
90623+ return 0;
90624+#endif
90625+
90626 if (all_var)
90627 return is_kernel(addr);
90628
90629@@ -481,7 +520,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
90630
90631 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
90632 {
90633- iter->name[0] = '\0';
90634 iter->nameoff = get_symbol_offset(new_pos);
90635 iter->pos = new_pos;
90636 }
90637@@ -529,6 +567,11 @@ static int s_show(struct seq_file *m, void *p)
90638 {
90639 struct kallsym_iter *iter = m->private;
90640
90641+#ifdef CONFIG_GRKERNSEC_HIDESYM
90642+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID))
90643+ return 0;
90644+#endif
90645+
90646 /* Some debugging symbols have no name. Ignore them. */
90647 if (!iter->name[0])
90648 return 0;
90649@@ -542,6 +585,7 @@ static int s_show(struct seq_file *m, void *p)
90650 */
90651 type = iter->exported ? toupper(iter->type) :
90652 tolower(iter->type);
90653+
90654 seq_printf(m, "%pK %c %s\t[%s]\n", (void *)iter->value,
90655 type, iter->name, iter->module_name);
90656 } else
90657@@ -567,7 +611,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
90658 struct kallsym_iter *iter;
90659 int ret;
90660
90661- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
90662+ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
90663 if (!iter)
90664 return -ENOMEM;
90665 reset_iter(iter, 0);
90666diff --git a/kernel/kcmp.c b/kernel/kcmp.c
90667index 0aa69ea..a7fcafb 100644
90668--- a/kernel/kcmp.c
90669+++ b/kernel/kcmp.c
90670@@ -100,6 +100,10 @@ SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type,
90671 struct task_struct *task1, *task2;
90672 int ret;
90673
90674+#ifdef CONFIG_GRKERNSEC
90675+ return -ENOSYS;
90676+#endif
90677+
90678 rcu_read_lock();
90679
90680 /*
90681diff --git a/kernel/kexec.c b/kernel/kexec.c
90682index 2bee072..8979af8 100644
90683--- a/kernel/kexec.c
90684+++ b/kernel/kexec.c
90685@@ -1349,7 +1349,8 @@ COMPAT_SYSCALL_DEFINE4(kexec_load, compat_ulong_t, entry,
90686 compat_ulong_t, flags)
90687 {
90688 struct compat_kexec_segment in;
90689- struct kexec_segment out, __user *ksegments;
90690+ struct kexec_segment out;
90691+ struct kexec_segment __user *ksegments;
90692 unsigned long i, result;
90693
90694 /* Don't allow clients that don't understand the native
90695diff --git a/kernel/kmod.c b/kernel/kmod.c
90696index 8637e04..8b1d0d8 100644
90697--- a/kernel/kmod.c
90698+++ b/kernel/kmod.c
90699@@ -75,7 +75,7 @@ static void free_modprobe_argv(struct subprocess_info *info)
90700 kfree(info->argv);
90701 }
90702
90703-static int call_modprobe(char *module_name, int wait)
90704+static int call_modprobe(char *module_name, char *module_param, int wait)
90705 {
90706 struct subprocess_info *info;
90707 static char *envp[] = {
90708@@ -85,7 +85,7 @@ static int call_modprobe(char *module_name, int wait)
90709 NULL
90710 };
90711
90712- char **argv = kmalloc(sizeof(char *[5]), GFP_KERNEL);
90713+ char **argv = kmalloc(sizeof(char *[6]), GFP_KERNEL);
90714 if (!argv)
90715 goto out;
90716
90717@@ -97,7 +97,8 @@ static int call_modprobe(char *module_name, int wait)
90718 argv[1] = "-q";
90719 argv[2] = "--";
90720 argv[3] = module_name; /* check free_modprobe_argv() */
90721- argv[4] = NULL;
90722+ argv[4] = module_param;
90723+ argv[5] = NULL;
90724
90725 info = call_usermodehelper_setup(modprobe_path, argv, envp, GFP_KERNEL,
90726 NULL, free_modprobe_argv, NULL);
90727@@ -129,9 +130,8 @@ out:
90728 * If module auto-loading support is disabled then this function
90729 * becomes a no-operation.
90730 */
90731-int __request_module(bool wait, const char *fmt, ...)
90732+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
90733 {
90734- va_list args;
90735 char module_name[MODULE_NAME_LEN];
90736 unsigned int max_modprobes;
90737 int ret;
90738@@ -150,9 +150,7 @@ int __request_module(bool wait, const char *fmt, ...)
90739 if (!modprobe_path[0])
90740 return 0;
90741
90742- va_start(args, fmt);
90743- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
90744- va_end(args);
90745+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
90746 if (ret >= MODULE_NAME_LEN)
90747 return -ENAMETOOLONG;
90748
90749@@ -160,6 +158,20 @@ int __request_module(bool wait, const char *fmt, ...)
90750 if (ret)
90751 return ret;
90752
90753+#ifdef CONFIG_GRKERNSEC_MODHARDEN
90754+ if (uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
90755+ /* hack to workaround consolekit/udisks stupidity */
90756+ read_lock(&tasklist_lock);
90757+ if (!strcmp(current->comm, "mount") &&
90758+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
90759+ read_unlock(&tasklist_lock);
90760+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
90761+ return -EPERM;
90762+ }
90763+ read_unlock(&tasklist_lock);
90764+ }
90765+#endif
90766+
90767 /* If modprobe needs a service that is in a module, we get a recursive
90768 * loop. Limit the number of running kmod threads to max_threads/2 or
90769 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
90770@@ -188,11 +200,52 @@ int __request_module(bool wait, const char *fmt, ...)
90771
90772 trace_module_request(module_name, wait, _RET_IP_);
90773
90774- ret = call_modprobe(module_name, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
90775+ ret = call_modprobe(module_name, module_param, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
90776
90777 atomic_dec(&kmod_concurrent);
90778 return ret;
90779 }
90780+
90781+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
90782+{
90783+ va_list args;
90784+ int ret;
90785+
90786+ va_start(args, fmt);
90787+ ret = ____request_module(wait, module_param, fmt, args);
90788+ va_end(args);
90789+
90790+ return ret;
90791+}
90792+
90793+int __request_module(bool wait, const char *fmt, ...)
90794+{
90795+ va_list args;
90796+ int ret;
90797+
90798+#ifdef CONFIG_GRKERNSEC_MODHARDEN
90799+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
90800+ char module_param[MODULE_NAME_LEN];
90801+
90802+ memset(module_param, 0, sizeof(module_param));
90803+
90804+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", GR_GLOBAL_UID(current_uid()));
90805+
90806+ va_start(args, fmt);
90807+ ret = ____request_module(wait, module_param, fmt, args);
90808+ va_end(args);
90809+
90810+ return ret;
90811+ }
90812+#endif
90813+
90814+ va_start(args, fmt);
90815+ ret = ____request_module(wait, NULL, fmt, args);
90816+ va_end(args);
90817+
90818+ return ret;
90819+}
90820+
90821 EXPORT_SYMBOL(__request_module);
90822 #endif /* CONFIG_MODULES */
90823
90824@@ -218,6 +271,20 @@ static int ____call_usermodehelper(void *data)
90825 */
90826 set_user_nice(current, 0);
90827
90828+#ifdef CONFIG_GRKERNSEC
90829+ /* this is race-free as far as userland is concerned as we copied
90830+ out the path to be used prior to this point and are now operating
90831+ on that copy
90832+ */
90833+ if ((strncmp(sub_info->path, "/sbin/", 6) && strncmp(sub_info->path, "/usr/lib/", 9) &&
90834+ strncmp(sub_info->path, "/lib/", 5) && strncmp(sub_info->path, "/lib64/", 7) &&
90835+ strcmp(sub_info->path, "/usr/share/apport/apport")) || strstr(sub_info->path, "..")) {
90836+ printk(KERN_ALERT "grsec: denied exec of usermode helper binary %.950s located outside of /sbin and system library paths\n", sub_info->path);
90837+ retval = -EPERM;
90838+ goto fail;
90839+ }
90840+#endif
90841+
90842 retval = -ENOMEM;
90843 new = prepare_kernel_cred(current);
90844 if (!new)
90845@@ -240,8 +307,8 @@ static int ____call_usermodehelper(void *data)
90846 commit_creds(new);
90847
90848 retval = do_execve(getname_kernel(sub_info->path),
90849- (const char __user *const __user *)sub_info->argv,
90850- (const char __user *const __user *)sub_info->envp);
90851+ (const char __user *const __force_user *)sub_info->argv,
90852+ (const char __user *const __force_user *)sub_info->envp);
90853 if (!retval)
90854 return 0;
90855
90856@@ -260,6 +327,10 @@ static int call_helper(void *data)
90857
90858 static void call_usermodehelper_freeinfo(struct subprocess_info *info)
90859 {
90860+#ifdef CONFIG_GRKERNSEC
90861+ kfree(info->path);
90862+ info->path = info->origpath;
90863+#endif
90864 if (info->cleanup)
90865 (*info->cleanup)(info);
90866 kfree(info);
90867@@ -300,7 +371,7 @@ static int wait_for_helper(void *data)
90868 *
90869 * Thus the __user pointer cast is valid here.
90870 */
90871- sys_wait4(pid, (int __user *)&ret, 0, NULL);
90872+ sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
90873
90874 /*
90875 * If ret is 0, either ____call_usermodehelper failed and the
90876@@ -539,7 +610,12 @@ struct subprocess_info *call_usermodehelper_setup(char *path, char **argv,
90877 goto out;
90878
90879 INIT_WORK(&sub_info->work, __call_usermodehelper);
90880+#ifdef CONFIG_GRKERNSEC
90881+ sub_info->origpath = path;
90882+ sub_info->path = kstrdup(path, gfp_mask);
90883+#else
90884 sub_info->path = path;
90885+#endif
90886 sub_info->argv = argv;
90887 sub_info->envp = envp;
90888
90889@@ -647,7 +723,7 @@ EXPORT_SYMBOL(call_usermodehelper);
90890 static int proc_cap_handler(struct ctl_table *table, int write,
90891 void __user *buffer, size_t *lenp, loff_t *ppos)
90892 {
90893- struct ctl_table t;
90894+ ctl_table_no_const t;
90895 unsigned long cap_array[_KERNEL_CAPABILITY_U32S];
90896 kernel_cap_t new_cap;
90897 int err, i;
90898diff --git a/kernel/kprobes.c b/kernel/kprobes.c
90899index 3995f54..e247879 100644
90900--- a/kernel/kprobes.c
90901+++ b/kernel/kprobes.c
90902@@ -31,6 +31,9 @@
90903 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
90904 * <prasanna@in.ibm.com> added function-return probes.
90905 */
90906+#ifdef CONFIG_GRKERNSEC_HIDESYM
90907+#define __INCLUDED_BY_HIDESYM 1
90908+#endif
90909 #include <linux/kprobes.h>
90910 #include <linux/hash.h>
90911 #include <linux/init.h>
90912@@ -122,12 +125,12 @@ enum kprobe_slot_state {
90913
90914 static void *alloc_insn_page(void)
90915 {
90916- return module_alloc(PAGE_SIZE);
90917+ return module_alloc_exec(PAGE_SIZE);
90918 }
90919
90920 static void free_insn_page(void *page)
90921 {
90922- module_free(NULL, page);
90923+ module_free_exec(NULL, page);
90924 }
90925
90926 struct kprobe_insn_cache kprobe_insn_slots = {
90927@@ -2187,11 +2190,11 @@ static void report_probe(struct seq_file *pi, struct kprobe *p,
90928 kprobe_type = "k";
90929
90930 if (sym)
90931- seq_printf(pi, "%p %s %s+0x%x %s ",
90932+ seq_printf(pi, "%pK %s %s+0x%x %s ",
90933 p->addr, kprobe_type, sym, offset,
90934 (modname ? modname : " "));
90935 else
90936- seq_printf(pi, "%p %s %p ",
90937+ seq_printf(pi, "%pK %s %pK ",
90938 p->addr, kprobe_type, p->addr);
90939
90940 if (!pp)
90941diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
90942index 6683cce..daf8999 100644
90943--- a/kernel/ksysfs.c
90944+++ b/kernel/ksysfs.c
90945@@ -50,6 +50,8 @@ static ssize_t uevent_helper_store(struct kobject *kobj,
90946 {
90947 if (count+1 > UEVENT_HELPER_PATH_LEN)
90948 return -ENOENT;
90949+ if (!capable(CAP_SYS_ADMIN))
90950+ return -EPERM;
90951 memcpy(uevent_helper, buf, count);
90952 uevent_helper[count] = '\0';
90953 if (count && uevent_helper[count-1] == '\n')
90954@@ -176,7 +178,7 @@ static ssize_t notes_read(struct file *filp, struct kobject *kobj,
90955 return count;
90956 }
90957
90958-static struct bin_attribute notes_attr = {
90959+static bin_attribute_no_const notes_attr __read_only = {
90960 .attr = {
90961 .name = "notes",
90962 .mode = S_IRUGO,
90963diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
90964index 88d0d44..e9ce0ee 100644
90965--- a/kernel/locking/lockdep.c
90966+++ b/kernel/locking/lockdep.c
90967@@ -599,6 +599,10 @@ static int static_obj(void *obj)
90968 end = (unsigned long) &_end,
90969 addr = (unsigned long) obj;
90970
90971+#ifdef CONFIG_PAX_KERNEXEC
90972+ start = ktla_ktva(start);
90973+#endif
90974+
90975 /*
90976 * static variable?
90977 */
90978@@ -740,6 +744,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
90979 if (!static_obj(lock->key)) {
90980 debug_locks_off();
90981 printk("INFO: trying to register non-static key.\n");
90982+ printk("lock:%pS key:%pS.\n", lock, lock->key);
90983 printk("the code is fine but needs lockdep annotation.\n");
90984 printk("turning off the locking correctness validator.\n");
90985 dump_stack();
90986@@ -3081,7 +3086,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
90987 if (!class)
90988 return 0;
90989 }
90990- atomic_inc((atomic_t *)&class->ops);
90991+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)&class->ops);
90992 if (very_verbose(class)) {
90993 printk("\nacquire class [%p] %s", class->key, class->name);
90994 if (class->name_version > 1)
90995diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c
90996index ef43ac4..2720dfa 100644
90997--- a/kernel/locking/lockdep_proc.c
90998+++ b/kernel/locking/lockdep_proc.c
90999@@ -65,7 +65,7 @@ static int l_show(struct seq_file *m, void *v)
91000 return 0;
91001 }
91002
91003- seq_printf(m, "%p", class->key);
91004+ seq_printf(m, "%pK", class->key);
91005 #ifdef CONFIG_DEBUG_LOCKDEP
91006 seq_printf(m, " OPS:%8ld", class->ops);
91007 #endif
91008@@ -83,7 +83,7 @@ static int l_show(struct seq_file *m, void *v)
91009
91010 list_for_each_entry(entry, &class->locks_after, entry) {
91011 if (entry->distance == 1) {
91012- seq_printf(m, " -> [%p] ", entry->class->key);
91013+ seq_printf(m, " -> [%pK] ", entry->class->key);
91014 print_name(m, entry->class);
91015 seq_puts(m, "\n");
91016 }
91017@@ -152,7 +152,7 @@ static int lc_show(struct seq_file *m, void *v)
91018 if (!class->key)
91019 continue;
91020
91021- seq_printf(m, "[%p] ", class->key);
91022+ seq_printf(m, "[%pK] ", class->key);
91023 print_name(m, class);
91024 seq_puts(m, "\n");
91025 }
91026@@ -496,7 +496,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
91027 if (!i)
91028 seq_line(m, '-', 40-namelen, namelen);
91029
91030- snprintf(ip, sizeof(ip), "[<%p>]",
91031+ snprintf(ip, sizeof(ip), "[<%pK>]",
91032 (void *)class->contention_point[i]);
91033 seq_printf(m, "%40s %14lu %29s %pS\n",
91034 name, stats->contention_point[i],
91035@@ -511,7 +511,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
91036 if (!i)
91037 seq_line(m, '-', 40-namelen, namelen);
91038
91039- snprintf(ip, sizeof(ip), "[<%p>]",
91040+ snprintf(ip, sizeof(ip), "[<%pK>]",
91041 (void *)class->contending_point[i]);
91042 seq_printf(m, "%40s %14lu %29s %pS\n",
91043 name, stats->contending_point[i],
91044diff --git a/kernel/locking/mcs_spinlock.c b/kernel/locking/mcs_spinlock.c
91045index 9887a90..0cd2b1d 100644
91046--- a/kernel/locking/mcs_spinlock.c
91047+++ b/kernel/locking/mcs_spinlock.c
91048@@ -100,7 +100,7 @@ bool osq_lock(struct optimistic_spin_queue *lock)
91049
91050 prev = decode_cpu(old);
91051 node->prev = prev;
91052- ACCESS_ONCE(prev->next) = node;
91053+ ACCESS_ONCE_RW(prev->next) = node;
91054
91055 /*
91056 * Normally @prev is untouchable after the above store; because at that
91057@@ -172,8 +172,8 @@ unqueue:
91058 * it will wait in Step-A.
91059 */
91060
91061- ACCESS_ONCE(next->prev) = prev;
91062- ACCESS_ONCE(prev->next) = next;
91063+ ACCESS_ONCE_RW(next->prev) = prev;
91064+ ACCESS_ONCE_RW(prev->next) = next;
91065
91066 return false;
91067 }
91068@@ -195,13 +195,13 @@ void osq_unlock(struct optimistic_spin_queue *lock)
91069 node = this_cpu_ptr(&osq_node);
91070 next = xchg(&node->next, NULL);
91071 if (next) {
91072- ACCESS_ONCE(next->locked) = 1;
91073+ ACCESS_ONCE_RW(next->locked) = 1;
91074 return;
91075 }
91076
91077 next = osq_wait_next(lock, node, NULL);
91078 if (next)
91079- ACCESS_ONCE(next->locked) = 1;
91080+ ACCESS_ONCE_RW(next->locked) = 1;
91081 }
91082
91083 #endif
91084diff --git a/kernel/locking/mcs_spinlock.h b/kernel/locking/mcs_spinlock.h
91085index 23e89c5..8558eac 100644
91086--- a/kernel/locking/mcs_spinlock.h
91087+++ b/kernel/locking/mcs_spinlock.h
91088@@ -81,7 +81,7 @@ void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
91089 */
91090 return;
91091 }
91092- ACCESS_ONCE(prev->next) = node;
91093+ ACCESS_ONCE_RW(prev->next) = node;
91094
91095 /* Wait until the lock holder passes the lock down. */
91096 arch_mcs_spin_lock_contended(&node->locked);
91097diff --git a/kernel/locking/mutex-debug.c b/kernel/locking/mutex-debug.c
91098index 5cf6731..ce3bc5a 100644
91099--- a/kernel/locking/mutex-debug.c
91100+++ b/kernel/locking/mutex-debug.c
91101@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
91102 }
91103
91104 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
91105- struct thread_info *ti)
91106+ struct task_struct *task)
91107 {
91108 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
91109
91110 /* Mark the current thread as blocked on the lock: */
91111- ti->task->blocked_on = waiter;
91112+ task->blocked_on = waiter;
91113 }
91114
91115 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
91116- struct thread_info *ti)
91117+ struct task_struct *task)
91118 {
91119 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
91120- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
91121- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
91122- ti->task->blocked_on = NULL;
91123+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
91124+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
91125+ task->blocked_on = NULL;
91126
91127 list_del_init(&waiter->list);
91128 waiter->task = NULL;
91129diff --git a/kernel/locking/mutex-debug.h b/kernel/locking/mutex-debug.h
91130index 0799fd3..d06ae3b 100644
91131--- a/kernel/locking/mutex-debug.h
91132+++ b/kernel/locking/mutex-debug.h
91133@@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
91134 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
91135 extern void debug_mutex_add_waiter(struct mutex *lock,
91136 struct mutex_waiter *waiter,
91137- struct thread_info *ti);
91138+ struct task_struct *task);
91139 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
91140- struct thread_info *ti);
91141+ struct task_struct *task);
91142 extern void debug_mutex_unlock(struct mutex *lock);
91143 extern void debug_mutex_init(struct mutex *lock, const char *name,
91144 struct lock_class_key *key);
91145diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
91146index ae712b2..d0d4a41 100644
91147--- a/kernel/locking/mutex.c
91148+++ b/kernel/locking/mutex.c
91149@@ -486,7 +486,7 @@ slowpath:
91150 goto skip_wait;
91151
91152 debug_mutex_lock_common(lock, &waiter);
91153- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
91154+ debug_mutex_add_waiter(lock, &waiter, task);
91155
91156 /* add waiting tasks to the end of the waitqueue (FIFO): */
91157 list_add_tail(&waiter.list, &lock->wait_list);
91158@@ -531,7 +531,7 @@ slowpath:
91159 schedule_preempt_disabled();
91160 spin_lock_mutex(&lock->wait_lock, flags);
91161 }
91162- mutex_remove_waiter(lock, &waiter, current_thread_info());
91163+ mutex_remove_waiter(lock, &waiter, task);
91164 /* set it to 0 if there are no waiters left: */
91165 if (likely(list_empty(&lock->wait_list)))
91166 atomic_set(&lock->count, 0);
91167@@ -568,7 +568,7 @@ skip_wait:
91168 return 0;
91169
91170 err:
91171- mutex_remove_waiter(lock, &waiter, task_thread_info(task));
91172+ mutex_remove_waiter(lock, &waiter, task);
91173 spin_unlock_mutex(&lock->wait_lock, flags);
91174 debug_mutex_free_waiter(&waiter);
91175 mutex_release(&lock->dep_map, 1, ip);
91176diff --git a/kernel/locking/rtmutex-tester.c b/kernel/locking/rtmutex-tester.c
91177index 1d96dd0..994ff19 100644
91178--- a/kernel/locking/rtmutex-tester.c
91179+++ b/kernel/locking/rtmutex-tester.c
91180@@ -22,7 +22,7 @@
91181 #define MAX_RT_TEST_MUTEXES 8
91182
91183 static spinlock_t rttest_lock;
91184-static atomic_t rttest_event;
91185+static atomic_unchecked_t rttest_event;
91186
91187 struct test_thread_data {
91188 int opcode;
91189@@ -63,7 +63,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
91190
91191 case RTTEST_LOCKCONT:
91192 td->mutexes[td->opdata] = 1;
91193- td->event = atomic_add_return(1, &rttest_event);
91194+ td->event = atomic_add_return_unchecked(1, &rttest_event);
91195 return 0;
91196
91197 case RTTEST_RESET:
91198@@ -76,7 +76,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
91199 return 0;
91200
91201 case RTTEST_RESETEVENT:
91202- atomic_set(&rttest_event, 0);
91203+ atomic_set_unchecked(&rttest_event, 0);
91204 return 0;
91205
91206 default:
91207@@ -93,9 +93,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
91208 return ret;
91209
91210 td->mutexes[id] = 1;
91211- td->event = atomic_add_return(1, &rttest_event);
91212+ td->event = atomic_add_return_unchecked(1, &rttest_event);
91213 rt_mutex_lock(&mutexes[id]);
91214- td->event = atomic_add_return(1, &rttest_event);
91215+ td->event = atomic_add_return_unchecked(1, &rttest_event);
91216 td->mutexes[id] = 4;
91217 return 0;
91218
91219@@ -106,9 +106,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
91220 return ret;
91221
91222 td->mutexes[id] = 1;
91223- td->event = atomic_add_return(1, &rttest_event);
91224+ td->event = atomic_add_return_unchecked(1, &rttest_event);
91225 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
91226- td->event = atomic_add_return(1, &rttest_event);
91227+ td->event = atomic_add_return_unchecked(1, &rttest_event);
91228 td->mutexes[id] = ret ? 0 : 4;
91229 return ret ? -EINTR : 0;
91230
91231@@ -117,9 +117,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
91232 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
91233 return ret;
91234
91235- td->event = atomic_add_return(1, &rttest_event);
91236+ td->event = atomic_add_return_unchecked(1, &rttest_event);
91237 rt_mutex_unlock(&mutexes[id]);
91238- td->event = atomic_add_return(1, &rttest_event);
91239+ td->event = atomic_add_return_unchecked(1, &rttest_event);
91240 td->mutexes[id] = 0;
91241 return 0;
91242
91243@@ -166,7 +166,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
91244 break;
91245
91246 td->mutexes[dat] = 2;
91247- td->event = atomic_add_return(1, &rttest_event);
91248+ td->event = atomic_add_return_unchecked(1, &rttest_event);
91249 break;
91250
91251 default:
91252@@ -186,7 +186,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
91253 return;
91254
91255 td->mutexes[dat] = 3;
91256- td->event = atomic_add_return(1, &rttest_event);
91257+ td->event = atomic_add_return_unchecked(1, &rttest_event);
91258 break;
91259
91260 case RTTEST_LOCKNOWAIT:
91261@@ -198,7 +198,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
91262 return;
91263
91264 td->mutexes[dat] = 1;
91265- td->event = atomic_add_return(1, &rttest_event);
91266+ td->event = atomic_add_return_unchecked(1, &rttest_event);
91267 return;
91268
91269 default:
91270diff --git a/kernel/module.c b/kernel/module.c
91271index 03214bd2..6242887 100644
91272--- a/kernel/module.c
91273+++ b/kernel/module.c
91274@@ -60,6 +60,7 @@
91275 #include <linux/jump_label.h>
91276 #include <linux/pfn.h>
91277 #include <linux/bsearch.h>
91278+#include <linux/grsecurity.h>
91279 #include <uapi/linux/module.h>
91280 #include "module-internal.h"
91281
91282@@ -156,7 +157,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
91283
91284 /* Bounds of module allocation, for speeding __module_address.
91285 * Protected by module_mutex. */
91286-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
91287+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
91288+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
91289
91290 int register_module_notifier(struct notifier_block * nb)
91291 {
91292@@ -323,7 +325,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
91293 return true;
91294
91295 list_for_each_entry_rcu(mod, &modules, list) {
91296- struct symsearch arr[] = {
91297+ struct symsearch modarr[] = {
91298 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
91299 NOT_GPL_ONLY, false },
91300 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
91301@@ -348,7 +350,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
91302 if (mod->state == MODULE_STATE_UNFORMED)
91303 continue;
91304
91305- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
91306+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
91307 return true;
91308 }
91309 return false;
91310@@ -488,7 +490,7 @@ static int percpu_modalloc(struct module *mod, struct load_info *info)
91311 if (!pcpusec->sh_size)
91312 return 0;
91313
91314- if (align > PAGE_SIZE) {
91315+ if (align-1 >= PAGE_SIZE) {
91316 pr_warn("%s: per-cpu alignment %li > %li\n",
91317 mod->name, align, PAGE_SIZE);
91318 align = PAGE_SIZE;
91319@@ -1060,7 +1062,7 @@ struct module_attribute module_uevent =
91320 static ssize_t show_coresize(struct module_attribute *mattr,
91321 struct module_kobject *mk, char *buffer)
91322 {
91323- return sprintf(buffer, "%u\n", mk->mod->core_size);
91324+ return sprintf(buffer, "%u\n", mk->mod->core_size_rx + mk->mod->core_size_rw);
91325 }
91326
91327 static struct module_attribute modinfo_coresize =
91328@@ -1069,7 +1071,7 @@ static struct module_attribute modinfo_coresize =
91329 static ssize_t show_initsize(struct module_attribute *mattr,
91330 struct module_kobject *mk, char *buffer)
91331 {
91332- return sprintf(buffer, "%u\n", mk->mod->init_size);
91333+ return sprintf(buffer, "%u\n", mk->mod->init_size_rx + mk->mod->init_size_rw);
91334 }
91335
91336 static struct module_attribute modinfo_initsize =
91337@@ -1161,12 +1163,29 @@ static int check_version(Elf_Shdr *sechdrs,
91338 goto bad_version;
91339 }
91340
91341+#ifdef CONFIG_GRKERNSEC_RANDSTRUCT
91342+ /*
91343+ * avoid potentially printing jibberish on attempted load
91344+ * of a module randomized with a different seed
91345+ */
91346+ pr_warn("no symbol version for %s\n", symname);
91347+#else
91348 pr_warn("%s: no symbol version for %s\n", mod->name, symname);
91349+#endif
91350 return 0;
91351
91352 bad_version:
91353+#ifdef CONFIG_GRKERNSEC_RANDSTRUCT
91354+ /*
91355+ * avoid potentially printing jibberish on attempted load
91356+ * of a module randomized with a different seed
91357+ */
91358+ printk("attempted module disagrees about version of symbol %s\n",
91359+ symname);
91360+#else
91361 printk("%s: disagrees about version of symbol %s\n",
91362 mod->name, symname);
91363+#endif
91364 return 0;
91365 }
91366
91367@@ -1282,7 +1301,7 @@ resolve_symbol_wait(struct module *mod,
91368 */
91369 #ifdef CONFIG_SYSFS
91370
91371-#ifdef CONFIG_KALLSYMS
91372+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
91373 static inline bool sect_empty(const Elf_Shdr *sect)
91374 {
91375 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
91376@@ -1422,7 +1441,7 @@ static void add_notes_attrs(struct module *mod, const struct load_info *info)
91377 {
91378 unsigned int notes, loaded, i;
91379 struct module_notes_attrs *notes_attrs;
91380- struct bin_attribute *nattr;
91381+ bin_attribute_no_const *nattr;
91382
91383 /* failed to create section attributes, so can't create notes */
91384 if (!mod->sect_attrs)
91385@@ -1534,7 +1553,7 @@ static void del_usage_links(struct module *mod)
91386 static int module_add_modinfo_attrs(struct module *mod)
91387 {
91388 struct module_attribute *attr;
91389- struct module_attribute *temp_attr;
91390+ module_attribute_no_const *temp_attr;
91391 int error = 0;
91392 int i;
91393
91394@@ -1755,21 +1774,21 @@ static void set_section_ro_nx(void *base,
91395
91396 static void unset_module_core_ro_nx(struct module *mod)
91397 {
91398- set_page_attributes(mod->module_core + mod->core_text_size,
91399- mod->module_core + mod->core_size,
91400+ set_page_attributes(mod->module_core_rw,
91401+ mod->module_core_rw + mod->core_size_rw,
91402 set_memory_x);
91403- set_page_attributes(mod->module_core,
91404- mod->module_core + mod->core_ro_size,
91405+ set_page_attributes(mod->module_core_rx,
91406+ mod->module_core_rx + mod->core_size_rx,
91407 set_memory_rw);
91408 }
91409
91410 static void unset_module_init_ro_nx(struct module *mod)
91411 {
91412- set_page_attributes(mod->module_init + mod->init_text_size,
91413- mod->module_init + mod->init_size,
91414+ set_page_attributes(mod->module_init_rw,
91415+ mod->module_init_rw + mod->init_size_rw,
91416 set_memory_x);
91417- set_page_attributes(mod->module_init,
91418- mod->module_init + mod->init_ro_size,
91419+ set_page_attributes(mod->module_init_rx,
91420+ mod->module_init_rx + mod->init_size_rx,
91421 set_memory_rw);
91422 }
91423
91424@@ -1782,14 +1801,14 @@ void set_all_modules_text_rw(void)
91425 list_for_each_entry_rcu(mod, &modules, list) {
91426 if (mod->state == MODULE_STATE_UNFORMED)
91427 continue;
91428- if ((mod->module_core) && (mod->core_text_size)) {
91429- set_page_attributes(mod->module_core,
91430- mod->module_core + mod->core_text_size,
91431+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
91432+ set_page_attributes(mod->module_core_rx,
91433+ mod->module_core_rx + mod->core_size_rx,
91434 set_memory_rw);
91435 }
91436- if ((mod->module_init) && (mod->init_text_size)) {
91437- set_page_attributes(mod->module_init,
91438- mod->module_init + mod->init_text_size,
91439+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
91440+ set_page_attributes(mod->module_init_rx,
91441+ mod->module_init_rx + mod->init_size_rx,
91442 set_memory_rw);
91443 }
91444 }
91445@@ -1805,14 +1824,14 @@ void set_all_modules_text_ro(void)
91446 list_for_each_entry_rcu(mod, &modules, list) {
91447 if (mod->state == MODULE_STATE_UNFORMED)
91448 continue;
91449- if ((mod->module_core) && (mod->core_text_size)) {
91450- set_page_attributes(mod->module_core,
91451- mod->module_core + mod->core_text_size,
91452+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
91453+ set_page_attributes(mod->module_core_rx,
91454+ mod->module_core_rx + mod->core_size_rx,
91455 set_memory_ro);
91456 }
91457- if ((mod->module_init) && (mod->init_text_size)) {
91458- set_page_attributes(mod->module_init,
91459- mod->module_init + mod->init_text_size,
91460+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
91461+ set_page_attributes(mod->module_init_rx,
91462+ mod->module_init_rx + mod->init_size_rx,
91463 set_memory_ro);
91464 }
91465 }
91466@@ -1842,7 +1861,9 @@ static void free_module(struct module *mod)
91467
91468 /* We leave it in list to prevent duplicate loads, but make sure
91469 * that noone uses it while it's being deconstructed. */
91470+ mutex_lock(&module_mutex);
91471 mod->state = MODULE_STATE_UNFORMED;
91472+ mutex_unlock(&module_mutex);
91473
91474 /* Remove dynamic debug info */
91475 ddebug_remove_module(mod->name);
91476@@ -1863,16 +1884,19 @@ static void free_module(struct module *mod)
91477
91478 /* This may be NULL, but that's OK */
91479 unset_module_init_ro_nx(mod);
91480- module_free(mod, mod->module_init);
91481+ module_free(mod, mod->module_init_rw);
91482+ module_free_exec(mod, mod->module_init_rx);
91483 kfree(mod->args);
91484 percpu_modfree(mod);
91485
91486 /* Free lock-classes: */
91487- lockdep_free_key_range(mod->module_core, mod->core_size);
91488+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
91489+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
91490
91491 /* Finally, free the core (containing the module structure) */
91492 unset_module_core_ro_nx(mod);
91493- module_free(mod, mod->module_core);
91494+ module_free_exec(mod, mod->module_core_rx);
91495+ module_free(mod, mod->module_core_rw);
91496
91497 #ifdef CONFIG_MPU
91498 update_protections(current->mm);
91499@@ -1941,9 +1965,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
91500 int ret = 0;
91501 const struct kernel_symbol *ksym;
91502
91503+#ifdef CONFIG_GRKERNSEC_MODHARDEN
91504+ int is_fs_load = 0;
91505+ int register_filesystem_found = 0;
91506+ char *p;
91507+
91508+ p = strstr(mod->args, "grsec_modharden_fs");
91509+ if (p) {
91510+ char *endptr = p + sizeof("grsec_modharden_fs") - 1;
91511+ /* copy \0 as well */
91512+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
91513+ is_fs_load = 1;
91514+ }
91515+#endif
91516+
91517 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
91518 const char *name = info->strtab + sym[i].st_name;
91519
91520+#ifdef CONFIG_GRKERNSEC_MODHARDEN
91521+ /* it's a real shame this will never get ripped and copied
91522+ upstream! ;(
91523+ */
91524+ if (is_fs_load && !strcmp(name, "register_filesystem"))
91525+ register_filesystem_found = 1;
91526+#endif
91527+
91528 switch (sym[i].st_shndx) {
91529 case SHN_COMMON:
91530 /* Ignore common symbols */
91531@@ -1968,7 +2014,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
91532 ksym = resolve_symbol_wait(mod, info, name);
91533 /* Ok if resolved. */
91534 if (ksym && !IS_ERR(ksym)) {
91535+ pax_open_kernel();
91536 sym[i].st_value = ksym->value;
91537+ pax_close_kernel();
91538 break;
91539 }
91540
91541@@ -1987,11 +2035,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
91542 secbase = (unsigned long)mod_percpu(mod);
91543 else
91544 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
91545+ pax_open_kernel();
91546 sym[i].st_value += secbase;
91547+ pax_close_kernel();
91548 break;
91549 }
91550 }
91551
91552+#ifdef CONFIG_GRKERNSEC_MODHARDEN
91553+ if (is_fs_load && !register_filesystem_found) {
91554+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
91555+ ret = -EPERM;
91556+ }
91557+#endif
91558+
91559 return ret;
91560 }
91561
91562@@ -2075,22 +2132,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
91563 || s->sh_entsize != ~0UL
91564 || strstarts(sname, ".init"))
91565 continue;
91566- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
91567+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
91568+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
91569+ else
91570+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
91571 pr_debug("\t%s\n", sname);
91572 }
91573- switch (m) {
91574- case 0: /* executable */
91575- mod->core_size = debug_align(mod->core_size);
91576- mod->core_text_size = mod->core_size;
91577- break;
91578- case 1: /* RO: text and ro-data */
91579- mod->core_size = debug_align(mod->core_size);
91580- mod->core_ro_size = mod->core_size;
91581- break;
91582- case 3: /* whole core */
91583- mod->core_size = debug_align(mod->core_size);
91584- break;
91585- }
91586 }
91587
91588 pr_debug("Init section allocation order:\n");
91589@@ -2104,23 +2151,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
91590 || s->sh_entsize != ~0UL
91591 || !strstarts(sname, ".init"))
91592 continue;
91593- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
91594- | INIT_OFFSET_MASK);
91595+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
91596+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
91597+ else
91598+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
91599+ s->sh_entsize |= INIT_OFFSET_MASK;
91600 pr_debug("\t%s\n", sname);
91601 }
91602- switch (m) {
91603- case 0: /* executable */
91604- mod->init_size = debug_align(mod->init_size);
91605- mod->init_text_size = mod->init_size;
91606- break;
91607- case 1: /* RO: text and ro-data */
91608- mod->init_size = debug_align(mod->init_size);
91609- mod->init_ro_size = mod->init_size;
91610- break;
91611- case 3: /* whole init */
91612- mod->init_size = debug_align(mod->init_size);
91613- break;
91614- }
91615 }
91616 }
91617
91618@@ -2293,7 +2330,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
91619
91620 /* Put symbol section at end of init part of module. */
91621 symsect->sh_flags |= SHF_ALLOC;
91622- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
91623+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
91624 info->index.sym) | INIT_OFFSET_MASK;
91625 pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
91626
91627@@ -2310,13 +2347,13 @@ static void layout_symtab(struct module *mod, struct load_info *info)
91628 }
91629
91630 /* Append room for core symbols at end of core part. */
91631- info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
91632- info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
91633- mod->core_size += strtab_size;
91634+ info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
91635+ info->stroffs = mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
91636+ mod->core_size_rx += strtab_size;
91637
91638 /* Put string table section at end of init part of module. */
91639 strsect->sh_flags |= SHF_ALLOC;
91640- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
91641+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
91642 info->index.str) | INIT_OFFSET_MASK;
91643 pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
91644 }
91645@@ -2334,12 +2371,14 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
91646 /* Make sure we get permanent strtab: don't use info->strtab. */
91647 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
91648
91649+ pax_open_kernel();
91650+
91651 /* Set types up while we still have access to sections. */
91652 for (i = 0; i < mod->num_symtab; i++)
91653 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
91654
91655- mod->core_symtab = dst = mod->module_core + info->symoffs;
91656- mod->core_strtab = s = mod->module_core + info->stroffs;
91657+ mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
91658+ mod->core_strtab = s = mod->module_core_rx + info->stroffs;
91659 src = mod->symtab;
91660 for (ndst = i = 0; i < mod->num_symtab; i++) {
91661 if (i == 0 ||
91662@@ -2351,6 +2390,8 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
91663 }
91664 }
91665 mod->core_num_syms = ndst;
91666+
91667+ pax_close_kernel();
91668 }
91669 #else
91670 static inline void layout_symtab(struct module *mod, struct load_info *info)
91671@@ -2384,17 +2425,33 @@ void * __weak module_alloc(unsigned long size)
91672 return vmalloc_exec(size);
91673 }
91674
91675-static void *module_alloc_update_bounds(unsigned long size)
91676+static void *module_alloc_update_bounds_rw(unsigned long size)
91677 {
91678 void *ret = module_alloc(size);
91679
91680 if (ret) {
91681 mutex_lock(&module_mutex);
91682 /* Update module bounds. */
91683- if ((unsigned long)ret < module_addr_min)
91684- module_addr_min = (unsigned long)ret;
91685- if ((unsigned long)ret + size > module_addr_max)
91686- module_addr_max = (unsigned long)ret + size;
91687+ if ((unsigned long)ret < module_addr_min_rw)
91688+ module_addr_min_rw = (unsigned long)ret;
91689+ if ((unsigned long)ret + size > module_addr_max_rw)
91690+ module_addr_max_rw = (unsigned long)ret + size;
91691+ mutex_unlock(&module_mutex);
91692+ }
91693+ return ret;
91694+}
91695+
91696+static void *module_alloc_update_bounds_rx(unsigned long size)
91697+{
91698+ void *ret = module_alloc_exec(size);
91699+
91700+ if (ret) {
91701+ mutex_lock(&module_mutex);
91702+ /* Update module bounds. */
91703+ if ((unsigned long)ret < module_addr_min_rx)
91704+ module_addr_min_rx = (unsigned long)ret;
91705+ if ((unsigned long)ret + size > module_addr_max_rx)
91706+ module_addr_max_rx = (unsigned long)ret + size;
91707 mutex_unlock(&module_mutex);
91708 }
91709 return ret;
91710@@ -2648,7 +2705,15 @@ static struct module *setup_load_info(struct load_info *info, int flags)
91711 mod = (void *)info->sechdrs[info->index.mod].sh_addr;
91712
91713 if (info->index.sym == 0) {
91714+#ifdef CONFIG_GRKERNSEC_RANDSTRUCT
91715+ /*
91716+ * avoid potentially printing jibberish on attempted load
91717+ * of a module randomized with a different seed
91718+ */
91719+ pr_warn("module has no symbols (stripped?)\n");
91720+#else
91721 pr_warn("%s: module has no symbols (stripped?)\n", mod->name);
91722+#endif
91723 return ERR_PTR(-ENOEXEC);
91724 }
91725
91726@@ -2664,8 +2729,14 @@ static struct module *setup_load_info(struct load_info *info, int flags)
91727 static int check_modinfo(struct module *mod, struct load_info *info, int flags)
91728 {
91729 const char *modmagic = get_modinfo(info, "vermagic");
91730+ const char *license = get_modinfo(info, "license");
91731 int err;
91732
91733+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
91734+ if (!license || !license_is_gpl_compatible(license))
91735+ return -ENOEXEC;
91736+#endif
91737+
91738 if (flags & MODULE_INIT_IGNORE_VERMAGIC)
91739 modmagic = NULL;
91740
91741@@ -2690,7 +2761,7 @@ static int check_modinfo(struct module *mod, struct load_info *info, int flags)
91742 }
91743
91744 /* Set up license info based on the info section */
91745- set_license(mod, get_modinfo(info, "license"));
91746+ set_license(mod, license);
91747
91748 return 0;
91749 }
91750@@ -2784,7 +2855,7 @@ static int move_module(struct module *mod, struct load_info *info)
91751 void *ptr;
91752
91753 /* Do the allocs. */
91754- ptr = module_alloc_update_bounds(mod->core_size);
91755+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
91756 /*
91757 * The pointer to this block is stored in the module structure
91758 * which is inside the block. Just mark it as not being a
91759@@ -2794,11 +2865,11 @@ static int move_module(struct module *mod, struct load_info *info)
91760 if (!ptr)
91761 return -ENOMEM;
91762
91763- memset(ptr, 0, mod->core_size);
91764- mod->module_core = ptr;
91765+ memset(ptr, 0, mod->core_size_rw);
91766+ mod->module_core_rw = ptr;
91767
91768- if (mod->init_size) {
91769- ptr = module_alloc_update_bounds(mod->init_size);
91770+ if (mod->init_size_rw) {
91771+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
91772 /*
91773 * The pointer to this block is stored in the module structure
91774 * which is inside the block. This block doesn't need to be
91775@@ -2807,13 +2878,45 @@ static int move_module(struct module *mod, struct load_info *info)
91776 */
91777 kmemleak_ignore(ptr);
91778 if (!ptr) {
91779- module_free(mod, mod->module_core);
91780+ module_free(mod, mod->module_core_rw);
91781 return -ENOMEM;
91782 }
91783- memset(ptr, 0, mod->init_size);
91784- mod->module_init = ptr;
91785+ memset(ptr, 0, mod->init_size_rw);
91786+ mod->module_init_rw = ptr;
91787 } else
91788- mod->module_init = NULL;
91789+ mod->module_init_rw = NULL;
91790+
91791+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
91792+ kmemleak_not_leak(ptr);
91793+ if (!ptr) {
91794+ if (mod->module_init_rw)
91795+ module_free(mod, mod->module_init_rw);
91796+ module_free(mod, mod->module_core_rw);
91797+ return -ENOMEM;
91798+ }
91799+
91800+ pax_open_kernel();
91801+ memset(ptr, 0, mod->core_size_rx);
91802+ pax_close_kernel();
91803+ mod->module_core_rx = ptr;
91804+
91805+ if (mod->init_size_rx) {
91806+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
91807+ kmemleak_ignore(ptr);
91808+ if (!ptr && mod->init_size_rx) {
91809+ module_free_exec(mod, mod->module_core_rx);
91810+ if (mod->module_init_rw)
91811+ module_free(mod, mod->module_init_rw);
91812+ module_free(mod, mod->module_core_rw);
91813+ return -ENOMEM;
91814+ }
91815+
91816+ pax_open_kernel();
91817+ memset(ptr, 0, mod->init_size_rx);
91818+ pax_close_kernel();
91819+ mod->module_init_rx = ptr;
91820+ } else
91821+ mod->module_init_rx = NULL;
91822
91823 /* Transfer each section which specifies SHF_ALLOC */
91824 pr_debug("final section addresses:\n");
91825@@ -2824,16 +2927,45 @@ static int move_module(struct module *mod, struct load_info *info)
91826 if (!(shdr->sh_flags & SHF_ALLOC))
91827 continue;
91828
91829- if (shdr->sh_entsize & INIT_OFFSET_MASK)
91830- dest = mod->module_init
91831- + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
91832- else
91833- dest = mod->module_core + shdr->sh_entsize;
91834+ if (shdr->sh_entsize & INIT_OFFSET_MASK) {
91835+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
91836+ dest = mod->module_init_rw
91837+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
91838+ else
91839+ dest = mod->module_init_rx
91840+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
91841+ } else {
91842+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
91843+ dest = mod->module_core_rw + shdr->sh_entsize;
91844+ else
91845+ dest = mod->module_core_rx + shdr->sh_entsize;
91846+ }
91847+
91848+ if (shdr->sh_type != SHT_NOBITS) {
91849+
91850+#ifdef CONFIG_PAX_KERNEXEC
91851+#ifdef CONFIG_X86_64
91852+ if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
91853+ set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
91854+#endif
91855+ if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
91856+ pax_open_kernel();
91857+ memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
91858+ pax_close_kernel();
91859+ } else
91860+#endif
91861
91862- if (shdr->sh_type != SHT_NOBITS)
91863 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
91864+ }
91865 /* Update sh_addr to point to copy in image. */
91866- shdr->sh_addr = (unsigned long)dest;
91867+
91868+#ifdef CONFIG_PAX_KERNEXEC
91869+ if (shdr->sh_flags & SHF_EXECINSTR)
91870+ shdr->sh_addr = ktva_ktla((unsigned long)dest);
91871+ else
91872+#endif
91873+
91874+ shdr->sh_addr = (unsigned long)dest;
91875 pr_debug("\t0x%lx %s\n",
91876 (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
91877 }
91878@@ -2890,12 +3022,12 @@ static void flush_module_icache(const struct module *mod)
91879 * Do it before processing of module parameters, so the module
91880 * can provide parameter accessor functions of its own.
91881 */
91882- if (mod->module_init)
91883- flush_icache_range((unsigned long)mod->module_init,
91884- (unsigned long)mod->module_init
91885- + mod->init_size);
91886- flush_icache_range((unsigned long)mod->module_core,
91887- (unsigned long)mod->module_core + mod->core_size);
91888+ if (mod->module_init_rx)
91889+ flush_icache_range((unsigned long)mod->module_init_rx,
91890+ (unsigned long)mod->module_init_rx
91891+ + mod->init_size_rx);
91892+ flush_icache_range((unsigned long)mod->module_core_rx,
91893+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
91894
91895 set_fs(old_fs);
91896 }
91897@@ -2952,8 +3084,10 @@ static struct module *layout_and_allocate(struct load_info *info, int flags)
91898 static void module_deallocate(struct module *mod, struct load_info *info)
91899 {
91900 percpu_modfree(mod);
91901- module_free(mod, mod->module_init);
91902- module_free(mod, mod->module_core);
91903+ module_free_exec(mod, mod->module_init_rx);
91904+ module_free_exec(mod, mod->module_core_rx);
91905+ module_free(mod, mod->module_init_rw);
91906+ module_free(mod, mod->module_core_rw);
91907 }
91908
91909 int __weak module_finalize(const Elf_Ehdr *hdr,
91910@@ -2966,7 +3100,9 @@ int __weak module_finalize(const Elf_Ehdr *hdr,
91911 static int post_relocation(struct module *mod, const struct load_info *info)
91912 {
91913 /* Sort exception table now relocations are done. */
91914+ pax_open_kernel();
91915 sort_extable(mod->extable, mod->extable + mod->num_exentries);
91916+ pax_close_kernel();
91917
91918 /* Copy relocated percpu area over. */
91919 percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr,
91920@@ -3075,11 +3211,12 @@ static int do_init_module(struct module *mod)
91921 mod->strtab = mod->core_strtab;
91922 #endif
91923 unset_module_init_ro_nx(mod);
91924- module_free(mod, mod->module_init);
91925- mod->module_init = NULL;
91926- mod->init_size = 0;
91927- mod->init_ro_size = 0;
91928- mod->init_text_size = 0;
91929+ module_free(mod, mod->module_init_rw);
91930+ module_free_exec(mod, mod->module_init_rx);
91931+ mod->module_init_rw = NULL;
91932+ mod->module_init_rx = NULL;
91933+ mod->init_size_rw = 0;
91934+ mod->init_size_rx = 0;
91935 mutex_unlock(&module_mutex);
91936 wake_up_all(&module_wq);
91937
91938@@ -3147,16 +3284,16 @@ static int complete_formation(struct module *mod, struct load_info *info)
91939 module_bug_finalize(info->hdr, info->sechdrs, mod);
91940
91941 /* Set RO and NX regions for core */
91942- set_section_ro_nx(mod->module_core,
91943- mod->core_text_size,
91944- mod->core_ro_size,
91945- mod->core_size);
91946+ set_section_ro_nx(mod->module_core_rx,
91947+ mod->core_size_rx,
91948+ mod->core_size_rx,
91949+ mod->core_size_rx);
91950
91951 /* Set RO and NX regions for init */
91952- set_section_ro_nx(mod->module_init,
91953- mod->init_text_size,
91954- mod->init_ro_size,
91955- mod->init_size);
91956+ set_section_ro_nx(mod->module_init_rx,
91957+ mod->init_size_rx,
91958+ mod->init_size_rx,
91959+ mod->init_size_rx);
91960
91961 /* Mark state as coming so strong_try_module_get() ignores us,
91962 * but kallsyms etc. can see us. */
91963@@ -3240,9 +3377,38 @@ static int load_module(struct load_info *info, const char __user *uargs,
91964 if (err)
91965 goto free_unload;
91966
91967+ /* Now copy in args */
91968+ mod->args = strndup_user(uargs, ~0UL >> 1);
91969+ if (IS_ERR(mod->args)) {
91970+ err = PTR_ERR(mod->args);
91971+ goto free_unload;
91972+ }
91973+
91974 /* Set up MODINFO_ATTR fields */
91975 setup_modinfo(mod, info);
91976
91977+#ifdef CONFIG_GRKERNSEC_MODHARDEN
91978+ {
91979+ char *p, *p2;
91980+
91981+ if (strstr(mod->args, "grsec_modharden_netdev")) {
91982+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
91983+ err = -EPERM;
91984+ goto free_modinfo;
91985+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
91986+ p += sizeof("grsec_modharden_normal") - 1;
91987+ p2 = strstr(p, "_");
91988+ if (p2) {
91989+ *p2 = '\0';
91990+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
91991+ *p2 = '_';
91992+ }
91993+ err = -EPERM;
91994+ goto free_modinfo;
91995+ }
91996+ }
91997+#endif
91998+
91999 /* Fix up syms, so that st_value is a pointer to location. */
92000 err = simplify_symbols(mod, info);
92001 if (err < 0)
92002@@ -3258,13 +3424,6 @@ static int load_module(struct load_info *info, const char __user *uargs,
92003
92004 flush_module_icache(mod);
92005
92006- /* Now copy in args */
92007- mod->args = strndup_user(uargs, ~0UL >> 1);
92008- if (IS_ERR(mod->args)) {
92009- err = PTR_ERR(mod->args);
92010- goto free_arch_cleanup;
92011- }
92012-
92013 dynamic_debug_setup(info->debug, info->num_debug);
92014
92015 /* Ftrace init must be called in the MODULE_STATE_UNFORMED state */
92016@@ -3312,11 +3471,10 @@ static int load_module(struct load_info *info, const char __user *uargs,
92017 ddebug_cleanup:
92018 dynamic_debug_remove(info->debug);
92019 synchronize_sched();
92020- kfree(mod->args);
92021- free_arch_cleanup:
92022 module_arch_cleanup(mod);
92023 free_modinfo:
92024 free_modinfo(mod);
92025+ kfree(mod->args);
92026 free_unload:
92027 module_unload_free(mod);
92028 unlink_mod:
92029@@ -3401,10 +3559,16 @@ static const char *get_ksymbol(struct module *mod,
92030 unsigned long nextval;
92031
92032 /* At worse, next value is at end of module */
92033- if (within_module_init(addr, mod))
92034- nextval = (unsigned long)mod->module_init+mod->init_text_size;
92035+ if (within_module_init_rx(addr, mod))
92036+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
92037+ else if (within_module_init_rw(addr, mod))
92038+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
92039+ else if (within_module_core_rx(addr, mod))
92040+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
92041+ else if (within_module_core_rw(addr, mod))
92042+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
92043 else
92044- nextval = (unsigned long)mod->module_core+mod->core_text_size;
92045+ return NULL;
92046
92047 /* Scan for closest preceding symbol, and next symbol. (ELF
92048 starts real symbols at 1). */
92049@@ -3652,7 +3816,7 @@ static int m_show(struct seq_file *m, void *p)
92050 return 0;
92051
92052 seq_printf(m, "%s %u",
92053- mod->name, mod->init_size + mod->core_size);
92054+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
92055 print_unload_info(m, mod);
92056
92057 /* Informative for users. */
92058@@ -3661,7 +3825,7 @@ static int m_show(struct seq_file *m, void *p)
92059 mod->state == MODULE_STATE_COMING ? "Loading":
92060 "Live");
92061 /* Used by oprofile and other similar tools. */
92062- seq_printf(m, " 0x%pK", mod->module_core);
92063+ seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
92064
92065 /* Taints info */
92066 if (mod->taints)
92067@@ -3697,7 +3861,17 @@ static const struct file_operations proc_modules_operations = {
92068
92069 static int __init proc_modules_init(void)
92070 {
92071+#ifndef CONFIG_GRKERNSEC_HIDESYM
92072+#ifdef CONFIG_GRKERNSEC_PROC_USER
92073+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
92074+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
92075+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
92076+#else
92077 proc_create("modules", 0, NULL, &proc_modules_operations);
92078+#endif
92079+#else
92080+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
92081+#endif
92082 return 0;
92083 }
92084 module_init(proc_modules_init);
92085@@ -3758,7 +3932,8 @@ struct module *__module_address(unsigned long addr)
92086 {
92087 struct module *mod;
92088
92089- if (addr < module_addr_min || addr > module_addr_max)
92090+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
92091+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
92092 return NULL;
92093
92094 list_for_each_entry_rcu(mod, &modules, list) {
92095@@ -3799,11 +3974,20 @@ bool is_module_text_address(unsigned long addr)
92096 */
92097 struct module *__module_text_address(unsigned long addr)
92098 {
92099- struct module *mod = __module_address(addr);
92100+ struct module *mod;
92101+
92102+#ifdef CONFIG_X86_32
92103+ addr = ktla_ktva(addr);
92104+#endif
92105+
92106+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
92107+ return NULL;
92108+
92109+ mod = __module_address(addr);
92110+
92111 if (mod) {
92112 /* Make sure it's within the text section. */
92113- if (!within(addr, mod->module_init, mod->init_text_size)
92114- && !within(addr, mod->module_core, mod->core_text_size))
92115+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
92116 mod = NULL;
92117 }
92118 return mod;
92119diff --git a/kernel/notifier.c b/kernel/notifier.c
92120index 4803da6..1c5eea6 100644
92121--- a/kernel/notifier.c
92122+++ b/kernel/notifier.c
92123@@ -5,6 +5,7 @@
92124 #include <linux/rcupdate.h>
92125 #include <linux/vmalloc.h>
92126 #include <linux/reboot.h>
92127+#include <linux/mm.h>
92128
92129 /*
92130 * Notifier list for kernel code which wants to be called
92131@@ -24,10 +25,12 @@ static int notifier_chain_register(struct notifier_block **nl,
92132 while ((*nl) != NULL) {
92133 if (n->priority > (*nl)->priority)
92134 break;
92135- nl = &((*nl)->next);
92136+ nl = (struct notifier_block **)&((*nl)->next);
92137 }
92138- n->next = *nl;
92139+ pax_open_kernel();
92140+ *(const void **)&n->next = *nl;
92141 rcu_assign_pointer(*nl, n);
92142+ pax_close_kernel();
92143 return 0;
92144 }
92145
92146@@ -39,10 +42,12 @@ static int notifier_chain_cond_register(struct notifier_block **nl,
92147 return 0;
92148 if (n->priority > (*nl)->priority)
92149 break;
92150- nl = &((*nl)->next);
92151+ nl = (struct notifier_block **)&((*nl)->next);
92152 }
92153- n->next = *nl;
92154+ pax_open_kernel();
92155+ *(const void **)&n->next = *nl;
92156 rcu_assign_pointer(*nl, n);
92157+ pax_close_kernel();
92158 return 0;
92159 }
92160
92161@@ -51,10 +56,12 @@ static int notifier_chain_unregister(struct notifier_block **nl,
92162 {
92163 while ((*nl) != NULL) {
92164 if ((*nl) == n) {
92165+ pax_open_kernel();
92166 rcu_assign_pointer(*nl, n->next);
92167+ pax_close_kernel();
92168 return 0;
92169 }
92170- nl = &((*nl)->next);
92171+ nl = (struct notifier_block **)&((*nl)->next);
92172 }
92173 return -ENOENT;
92174 }
92175diff --git a/kernel/padata.c b/kernel/padata.c
92176index 161402f..598814c 100644
92177--- a/kernel/padata.c
92178+++ b/kernel/padata.c
92179@@ -54,7 +54,7 @@ static int padata_cpu_hash(struct parallel_data *pd)
92180 * seq_nr mod. number of cpus in use.
92181 */
92182
92183- seq_nr = atomic_inc_return(&pd->seq_nr);
92184+ seq_nr = atomic_inc_return_unchecked(&pd->seq_nr);
92185 cpu_index = seq_nr % cpumask_weight(pd->cpumask.pcpu);
92186
92187 return padata_index_to_cpu(pd, cpu_index);
92188@@ -428,7 +428,7 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
92189 padata_init_pqueues(pd);
92190 padata_init_squeues(pd);
92191 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
92192- atomic_set(&pd->seq_nr, -1);
92193+ atomic_set_unchecked(&pd->seq_nr, -1);
92194 atomic_set(&pd->reorder_objects, 0);
92195 atomic_set(&pd->refcnt, 0);
92196 pd->pinst = pinst;
92197diff --git a/kernel/panic.c b/kernel/panic.c
92198index d09dc5c..9abbdff 100644
92199--- a/kernel/panic.c
92200+++ b/kernel/panic.c
92201@@ -53,7 +53,7 @@ EXPORT_SYMBOL(panic_blink);
92202 /*
92203 * Stop ourself in panic -- architecture code may override this
92204 */
92205-void __weak panic_smp_self_stop(void)
92206+void __weak __noreturn panic_smp_self_stop(void)
92207 {
92208 while (1)
92209 cpu_relax();
92210@@ -421,7 +421,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
92211 disable_trace_on_warning();
92212
92213 pr_warn("------------[ cut here ]------------\n");
92214- pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pS()\n",
92215+ pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pA()\n",
92216 raw_smp_processor_id(), current->pid, file, line, caller);
92217
92218 if (args)
92219@@ -475,7 +475,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
92220 */
92221 __visible void __stack_chk_fail(void)
92222 {
92223- panic("stack-protector: Kernel stack is corrupted in: %p\n",
92224+ dump_stack();
92225+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
92226 __builtin_return_address(0));
92227 }
92228 EXPORT_SYMBOL(__stack_chk_fail);
92229diff --git a/kernel/pid.c b/kernel/pid.c
92230index 9b9a266..c20ef80 100644
92231--- a/kernel/pid.c
92232+++ b/kernel/pid.c
92233@@ -33,6 +33,7 @@
92234 #include <linux/rculist.h>
92235 #include <linux/bootmem.h>
92236 #include <linux/hash.h>
92237+#include <linux/security.h>
92238 #include <linux/pid_namespace.h>
92239 #include <linux/init_task.h>
92240 #include <linux/syscalls.h>
92241@@ -47,7 +48,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
92242
92243 int pid_max = PID_MAX_DEFAULT;
92244
92245-#define RESERVED_PIDS 300
92246+#define RESERVED_PIDS 500
92247
92248 int pid_max_min = RESERVED_PIDS + 1;
92249 int pid_max_max = PID_MAX_LIMIT;
92250@@ -445,10 +446,18 @@ EXPORT_SYMBOL(pid_task);
92251 */
92252 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
92253 {
92254+ struct task_struct *task;
92255+
92256 rcu_lockdep_assert(rcu_read_lock_held(),
92257 "find_task_by_pid_ns() needs rcu_read_lock()"
92258 " protection");
92259- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
92260+
92261+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
92262+
92263+ if (gr_pid_is_chrooted(task))
92264+ return NULL;
92265+
92266+ return task;
92267 }
92268
92269 struct task_struct *find_task_by_vpid(pid_t vnr)
92270@@ -456,6 +465,14 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
92271 return find_task_by_pid_ns(vnr, task_active_pid_ns(current));
92272 }
92273
92274+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
92275+{
92276+ rcu_lockdep_assert(rcu_read_lock_held(),
92277+ "find_task_by_pid_ns() needs rcu_read_lock()"
92278+ " protection");
92279+ return pid_task(find_pid_ns(vnr, task_active_pid_ns(current)), PIDTYPE_PID);
92280+}
92281+
92282 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
92283 {
92284 struct pid *pid;
92285diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
92286index db95d8e..a0ca23f 100644
92287--- a/kernel/pid_namespace.c
92288+++ b/kernel/pid_namespace.c
92289@@ -253,7 +253,7 @@ static int pid_ns_ctl_handler(struct ctl_table *table, int write,
92290 void __user *buffer, size_t *lenp, loff_t *ppos)
92291 {
92292 struct pid_namespace *pid_ns = task_active_pid_ns(current);
92293- struct ctl_table tmp = *table;
92294+ ctl_table_no_const tmp = *table;
92295
92296 if (write && !ns_capable(pid_ns->user_ns, CAP_SYS_ADMIN))
92297 return -EPERM;
92298diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
92299index e4e4121..71faf14 100644
92300--- a/kernel/power/Kconfig
92301+++ b/kernel/power/Kconfig
92302@@ -24,6 +24,8 @@ config HIBERNATE_CALLBACKS
92303 config HIBERNATION
92304 bool "Hibernation (aka 'suspend to disk')"
92305 depends on SWAP && ARCH_HIBERNATION_POSSIBLE
92306+ depends on !GRKERNSEC_KMEM
92307+ depends on !PAX_MEMORY_SANITIZE
92308 select HIBERNATE_CALLBACKS
92309 select LZO_COMPRESS
92310 select LZO_DECOMPRESS
92311diff --git a/kernel/power/process.c b/kernel/power/process.c
92312index 4ee194e..925778f 100644
92313--- a/kernel/power/process.c
92314+++ b/kernel/power/process.c
92315@@ -35,6 +35,7 @@ static int try_to_freeze_tasks(bool user_only)
92316 unsigned int elapsed_msecs;
92317 bool wakeup = false;
92318 int sleep_usecs = USEC_PER_MSEC;
92319+ bool timedout = false;
92320
92321 do_gettimeofday(&start);
92322
92323@@ -45,13 +46,20 @@ static int try_to_freeze_tasks(bool user_only)
92324
92325 while (true) {
92326 todo = 0;
92327+ if (time_after(jiffies, end_time))
92328+ timedout = true;
92329 read_lock(&tasklist_lock);
92330 do_each_thread(g, p) {
92331 if (p == current || !freeze_task(p))
92332 continue;
92333
92334- if (!freezer_should_skip(p))
92335+ if (!freezer_should_skip(p)) {
92336 todo++;
92337+ if (timedout) {
92338+ printk(KERN_ERR "Task refusing to freeze:\n");
92339+ sched_show_task(p);
92340+ }
92341+ }
92342 } while_each_thread(g, p);
92343 read_unlock(&tasklist_lock);
92344
92345@@ -60,7 +68,7 @@ static int try_to_freeze_tasks(bool user_only)
92346 todo += wq_busy;
92347 }
92348
92349- if (!todo || time_after(jiffies, end_time))
92350+ if (!todo || timedout)
92351 break;
92352
92353 if (pm_wakeup_pending()) {
92354diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
92355index 1ce7706..3b07c49 100644
92356--- a/kernel/printk/printk.c
92357+++ b/kernel/printk/printk.c
92358@@ -490,6 +490,11 @@ static int check_syslog_permissions(int type, bool from_file)
92359 if (from_file && type != SYSLOG_ACTION_OPEN)
92360 return 0;
92361
92362+#ifdef CONFIG_GRKERNSEC_DMESG
92363+ if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
92364+ return -EPERM;
92365+#endif
92366+
92367 if (syslog_action_restricted(type)) {
92368 if (capable(CAP_SYSLOG))
92369 return 0;
92370diff --git a/kernel/profile.c b/kernel/profile.c
92371index 54bf5ba..df6e0a2 100644
92372--- a/kernel/profile.c
92373+++ b/kernel/profile.c
92374@@ -37,7 +37,7 @@ struct profile_hit {
92375 #define NR_PROFILE_HIT (PAGE_SIZE/sizeof(struct profile_hit))
92376 #define NR_PROFILE_GRP (NR_PROFILE_HIT/PROFILE_GRPSZ)
92377
92378-static atomic_t *prof_buffer;
92379+static atomic_unchecked_t *prof_buffer;
92380 static unsigned long prof_len, prof_shift;
92381
92382 int prof_on __read_mostly;
92383@@ -256,7 +256,7 @@ static void profile_flip_buffers(void)
92384 hits[i].pc = 0;
92385 continue;
92386 }
92387- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
92388+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
92389 hits[i].hits = hits[i].pc = 0;
92390 }
92391 }
92392@@ -317,9 +317,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
92393 * Add the current hit(s) and flush the write-queue out
92394 * to the global buffer:
92395 */
92396- atomic_add(nr_hits, &prof_buffer[pc]);
92397+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
92398 for (i = 0; i < NR_PROFILE_HIT; ++i) {
92399- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
92400+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
92401 hits[i].pc = hits[i].hits = 0;
92402 }
92403 out:
92404@@ -394,7 +394,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
92405 {
92406 unsigned long pc;
92407 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
92408- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
92409+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
92410 }
92411 #endif /* !CONFIG_SMP */
92412
92413@@ -490,7 +490,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
92414 return -EFAULT;
92415 buf++; p++; count--; read++;
92416 }
92417- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
92418+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
92419 if (copy_to_user(buf, (void *)pnt, count))
92420 return -EFAULT;
92421 read += count;
92422@@ -521,7 +521,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
92423 }
92424 #endif
92425 profile_discard_flip_buffers();
92426- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
92427+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
92428 return count;
92429 }
92430
92431diff --git a/kernel/ptrace.c b/kernel/ptrace.c
92432index 54e7522..5b82dd6 100644
92433--- a/kernel/ptrace.c
92434+++ b/kernel/ptrace.c
92435@@ -321,7 +321,7 @@ static int ptrace_attach(struct task_struct *task, long request,
92436 if (seize)
92437 flags |= PT_SEIZED;
92438 rcu_read_lock();
92439- if (ns_capable(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
92440+ if (ns_capable_nolog(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
92441 flags |= PT_PTRACE_CAP;
92442 rcu_read_unlock();
92443 task->ptrace = flags;
92444@@ -532,7 +532,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
92445 break;
92446 return -EIO;
92447 }
92448- if (copy_to_user(dst, buf, retval))
92449+ if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
92450 return -EFAULT;
92451 copied += retval;
92452 src += retval;
92453@@ -800,7 +800,7 @@ int ptrace_request(struct task_struct *child, long request,
92454 bool seized = child->ptrace & PT_SEIZED;
92455 int ret = -EIO;
92456 siginfo_t siginfo, *si;
92457- void __user *datavp = (void __user *) data;
92458+ void __user *datavp = (__force void __user *) data;
92459 unsigned long __user *datalp = datavp;
92460 unsigned long flags;
92461
92462@@ -1046,14 +1046,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
92463 goto out;
92464 }
92465
92466+ if (gr_handle_ptrace(child, request)) {
92467+ ret = -EPERM;
92468+ goto out_put_task_struct;
92469+ }
92470+
92471 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
92472 ret = ptrace_attach(child, request, addr, data);
92473 /*
92474 * Some architectures need to do book-keeping after
92475 * a ptrace attach.
92476 */
92477- if (!ret)
92478+ if (!ret) {
92479 arch_ptrace_attach(child);
92480+ gr_audit_ptrace(child);
92481+ }
92482 goto out_put_task_struct;
92483 }
92484
92485@@ -1081,7 +1088,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
92486 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
92487 if (copied != sizeof(tmp))
92488 return -EIO;
92489- return put_user(tmp, (unsigned long __user *)data);
92490+ return put_user(tmp, (__force unsigned long __user *)data);
92491 }
92492
92493 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
92494@@ -1175,7 +1182,7 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
92495 }
92496
92497 COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid,
92498- compat_long_t, addr, compat_long_t, data)
92499+ compat_ulong_t, addr, compat_ulong_t, data)
92500 {
92501 struct task_struct *child;
92502 long ret;
92503@@ -1191,14 +1198,21 @@ COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid,
92504 goto out;
92505 }
92506
92507+ if (gr_handle_ptrace(child, request)) {
92508+ ret = -EPERM;
92509+ goto out_put_task_struct;
92510+ }
92511+
92512 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
92513 ret = ptrace_attach(child, request, addr, data);
92514 /*
92515 * Some architectures need to do book-keeping after
92516 * a ptrace attach.
92517 */
92518- if (!ret)
92519+ if (!ret) {
92520 arch_ptrace_attach(child);
92521+ gr_audit_ptrace(child);
92522+ }
92523 goto out_put_task_struct;
92524 }
92525
92526diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
92527index 948a769..5ca842b 100644
92528--- a/kernel/rcu/rcutorture.c
92529+++ b/kernel/rcu/rcutorture.c
92530@@ -124,12 +124,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1],
92531 rcu_torture_count) = { 0 };
92532 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1],
92533 rcu_torture_batch) = { 0 };
92534-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
92535-static atomic_t n_rcu_torture_alloc;
92536-static atomic_t n_rcu_torture_alloc_fail;
92537-static atomic_t n_rcu_torture_free;
92538-static atomic_t n_rcu_torture_mberror;
92539-static atomic_t n_rcu_torture_error;
92540+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
92541+static atomic_unchecked_t n_rcu_torture_alloc;
92542+static atomic_unchecked_t n_rcu_torture_alloc_fail;
92543+static atomic_unchecked_t n_rcu_torture_free;
92544+static atomic_unchecked_t n_rcu_torture_mberror;
92545+static atomic_unchecked_t n_rcu_torture_error;
92546 static long n_rcu_torture_barrier_error;
92547 static long n_rcu_torture_boost_ktrerror;
92548 static long n_rcu_torture_boost_rterror;
92549@@ -200,11 +200,11 @@ rcu_torture_alloc(void)
92550
92551 spin_lock_bh(&rcu_torture_lock);
92552 if (list_empty(&rcu_torture_freelist)) {
92553- atomic_inc(&n_rcu_torture_alloc_fail);
92554+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
92555 spin_unlock_bh(&rcu_torture_lock);
92556 return NULL;
92557 }
92558- atomic_inc(&n_rcu_torture_alloc);
92559+ atomic_inc_unchecked(&n_rcu_torture_alloc);
92560 p = rcu_torture_freelist.next;
92561 list_del_init(p);
92562 spin_unlock_bh(&rcu_torture_lock);
92563@@ -217,7 +217,7 @@ rcu_torture_alloc(void)
92564 static void
92565 rcu_torture_free(struct rcu_torture *p)
92566 {
92567- atomic_inc(&n_rcu_torture_free);
92568+ atomic_inc_unchecked(&n_rcu_torture_free);
92569 spin_lock_bh(&rcu_torture_lock);
92570 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
92571 spin_unlock_bh(&rcu_torture_lock);
92572@@ -301,7 +301,7 @@ rcu_torture_pipe_update_one(struct rcu_torture *rp)
92573 i = rp->rtort_pipe_count;
92574 if (i > RCU_TORTURE_PIPE_LEN)
92575 i = RCU_TORTURE_PIPE_LEN;
92576- atomic_inc(&rcu_torture_wcount[i]);
92577+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
92578 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
92579 rp->rtort_mbtest = 0;
92580 return true;
92581@@ -808,7 +808,7 @@ rcu_torture_writer(void *arg)
92582 i = old_rp->rtort_pipe_count;
92583 if (i > RCU_TORTURE_PIPE_LEN)
92584 i = RCU_TORTURE_PIPE_LEN;
92585- atomic_inc(&rcu_torture_wcount[i]);
92586+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
92587 old_rp->rtort_pipe_count++;
92588 switch (synctype[torture_random(&rand) % nsynctypes]) {
92589 case RTWS_DEF_FREE:
92590@@ -926,7 +926,7 @@ static void rcu_torture_timer(unsigned long unused)
92591 return;
92592 }
92593 if (p->rtort_mbtest == 0)
92594- atomic_inc(&n_rcu_torture_mberror);
92595+ atomic_inc_unchecked(&n_rcu_torture_mberror);
92596 spin_lock(&rand_lock);
92597 cur_ops->read_delay(&rand);
92598 n_rcu_torture_timers++;
92599@@ -996,7 +996,7 @@ rcu_torture_reader(void *arg)
92600 continue;
92601 }
92602 if (p->rtort_mbtest == 0)
92603- atomic_inc(&n_rcu_torture_mberror);
92604+ atomic_inc_unchecked(&n_rcu_torture_mberror);
92605 cur_ops->read_delay(&rand);
92606 preempt_disable();
92607 pipe_count = p->rtort_pipe_count;
92608@@ -1054,15 +1054,15 @@ rcu_torture_printk(char *page)
92609 }
92610 page += sprintf(page, "%s%s ", torture_type, TORTURE_FLAG);
92611 page += sprintf(page,
92612- "rtc: %p ver: %lu tfle: %d rta: %d rtaf: %d rtf: %d ",
92613+ "rtc: %pP ver: %lu tfle: %d rta: %d rtaf: %d rtf: %d ",
92614 rcu_torture_current,
92615 rcu_torture_current_version,
92616 list_empty(&rcu_torture_freelist),
92617- atomic_read(&n_rcu_torture_alloc),
92618- atomic_read(&n_rcu_torture_alloc_fail),
92619- atomic_read(&n_rcu_torture_free));
92620+ atomic_read_unchecked(&n_rcu_torture_alloc),
92621+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
92622+ atomic_read_unchecked(&n_rcu_torture_free));
92623 page += sprintf(page, "rtmbe: %d rtbke: %ld rtbre: %ld ",
92624- atomic_read(&n_rcu_torture_mberror),
92625+ atomic_read_unchecked(&n_rcu_torture_mberror),
92626 n_rcu_torture_boost_ktrerror,
92627 n_rcu_torture_boost_rterror);
92628 page += sprintf(page, "rtbf: %ld rtb: %ld nt: %ld ",
92629@@ -1075,14 +1075,14 @@ rcu_torture_printk(char *page)
92630 n_barrier_attempts,
92631 n_rcu_torture_barrier_error);
92632 page += sprintf(page, "\n%s%s ", torture_type, TORTURE_FLAG);
92633- if (atomic_read(&n_rcu_torture_mberror) != 0 ||
92634+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
92635 n_rcu_torture_barrier_error != 0 ||
92636 n_rcu_torture_boost_ktrerror != 0 ||
92637 n_rcu_torture_boost_rterror != 0 ||
92638 n_rcu_torture_boost_failure != 0 ||
92639 i > 1) {
92640 page += sprintf(page, "!!! ");
92641- atomic_inc(&n_rcu_torture_error);
92642+ atomic_inc_unchecked(&n_rcu_torture_error);
92643 WARN_ON_ONCE(1);
92644 }
92645 page += sprintf(page, "Reader Pipe: ");
92646@@ -1096,7 +1096,7 @@ rcu_torture_printk(char *page)
92647 page += sprintf(page, "Free-Block Circulation: ");
92648 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
92649 page += sprintf(page, " %d",
92650- atomic_read(&rcu_torture_wcount[i]));
92651+ atomic_read_unchecked(&rcu_torture_wcount[i]));
92652 }
92653 page += sprintf(page, "\n");
92654 if (cur_ops->stats)
92655@@ -1461,7 +1461,7 @@ rcu_torture_cleanup(void)
92656
92657 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
92658
92659- if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
92660+ if (atomic_read_unchecked(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
92661 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
92662 else if (torture_onoff_failures())
92663 rcu_torture_print_module_parms(cur_ops,
92664@@ -1584,18 +1584,18 @@ rcu_torture_init(void)
92665
92666 rcu_torture_current = NULL;
92667 rcu_torture_current_version = 0;
92668- atomic_set(&n_rcu_torture_alloc, 0);
92669- atomic_set(&n_rcu_torture_alloc_fail, 0);
92670- atomic_set(&n_rcu_torture_free, 0);
92671- atomic_set(&n_rcu_torture_mberror, 0);
92672- atomic_set(&n_rcu_torture_error, 0);
92673+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
92674+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
92675+ atomic_set_unchecked(&n_rcu_torture_free, 0);
92676+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
92677+ atomic_set_unchecked(&n_rcu_torture_error, 0);
92678 n_rcu_torture_barrier_error = 0;
92679 n_rcu_torture_boost_ktrerror = 0;
92680 n_rcu_torture_boost_rterror = 0;
92681 n_rcu_torture_boost_failure = 0;
92682 n_rcu_torture_boosts = 0;
92683 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
92684- atomic_set(&rcu_torture_wcount[i], 0);
92685+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
92686 for_each_possible_cpu(cpu) {
92687 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
92688 per_cpu(rcu_torture_count, cpu)[i] = 0;
92689diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c
92690index d9efcc1..ea543e9 100644
92691--- a/kernel/rcu/tiny.c
92692+++ b/kernel/rcu/tiny.c
92693@@ -42,7 +42,7 @@
92694 /* Forward declarations for tiny_plugin.h. */
92695 struct rcu_ctrlblk;
92696 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
92697-static void rcu_process_callbacks(struct softirq_action *unused);
92698+static void rcu_process_callbacks(void);
92699 static void __call_rcu(struct rcu_head *head,
92700 void (*func)(struct rcu_head *rcu),
92701 struct rcu_ctrlblk *rcp);
92702@@ -308,7 +308,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
92703 false));
92704 }
92705
92706-static void rcu_process_callbacks(struct softirq_action *unused)
92707+static __latent_entropy void rcu_process_callbacks(void)
92708 {
92709 __rcu_process_callbacks(&rcu_sched_ctrlblk);
92710 __rcu_process_callbacks(&rcu_bh_ctrlblk);
92711diff --git a/kernel/rcu/tiny_plugin.h b/kernel/rcu/tiny_plugin.h
92712index 858c565..7efd915 100644
92713--- a/kernel/rcu/tiny_plugin.h
92714+++ b/kernel/rcu/tiny_plugin.h
92715@@ -152,17 +152,17 @@ static void check_cpu_stall(struct rcu_ctrlblk *rcp)
92716 dump_stack();
92717 }
92718 if (*rcp->curtail && ULONG_CMP_GE(j, js))
92719- ACCESS_ONCE(rcp->jiffies_stall) = jiffies +
92720+ ACCESS_ONCE_RW(rcp->jiffies_stall) = jiffies +
92721 3 * rcu_jiffies_till_stall_check() + 3;
92722 else if (ULONG_CMP_GE(j, js))
92723- ACCESS_ONCE(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
92724+ ACCESS_ONCE_RW(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
92725 }
92726
92727 static void reset_cpu_stall_ticks(struct rcu_ctrlblk *rcp)
92728 {
92729 rcp->ticks_this_gp = 0;
92730 rcp->gp_start = jiffies;
92731- ACCESS_ONCE(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
92732+ ACCESS_ONCE_RW(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
92733 }
92734
92735 static void check_cpu_stalls(void)
92736diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
92737index 1b70cb6..ea62b0a 100644
92738--- a/kernel/rcu/tree.c
92739+++ b/kernel/rcu/tree.c
92740@@ -263,7 +263,7 @@ static void rcu_momentary_dyntick_idle(void)
92741 */
92742 rdtp = this_cpu_ptr(&rcu_dynticks);
92743 smp_mb__before_atomic(); /* Earlier stuff before QS. */
92744- atomic_add(2, &rdtp->dynticks); /* QS. */
92745+ atomic_add_unchecked(2, &rdtp->dynticks); /* QS. */
92746 smp_mb__after_atomic(); /* Later stuff after QS. */
92747 break;
92748 }
92749@@ -523,9 +523,9 @@ static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval,
92750 rcu_prepare_for_idle(smp_processor_id());
92751 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
92752 smp_mb__before_atomic(); /* See above. */
92753- atomic_inc(&rdtp->dynticks);
92754+ atomic_inc_unchecked(&rdtp->dynticks);
92755 smp_mb__after_atomic(); /* Force ordering with next sojourn. */
92756- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
92757+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
92758
92759 /*
92760 * It is illegal to enter an extended quiescent state while
92761@@ -643,10 +643,10 @@ static void rcu_eqs_exit_common(struct rcu_dynticks *rdtp, long long oldval,
92762 int user)
92763 {
92764 smp_mb__before_atomic(); /* Force ordering w/previous sojourn. */
92765- atomic_inc(&rdtp->dynticks);
92766+ atomic_inc_unchecked(&rdtp->dynticks);
92767 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
92768 smp_mb__after_atomic(); /* See above. */
92769- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
92770+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
92771 rcu_cleanup_after_idle(smp_processor_id());
92772 trace_rcu_dyntick(TPS("End"), oldval, rdtp->dynticks_nesting);
92773 if (!user && !is_idle_task(current)) {
92774@@ -767,14 +767,14 @@ void rcu_nmi_enter(void)
92775 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
92776
92777 if (rdtp->dynticks_nmi_nesting == 0 &&
92778- (atomic_read(&rdtp->dynticks) & 0x1))
92779+ (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
92780 return;
92781 rdtp->dynticks_nmi_nesting++;
92782 smp_mb__before_atomic(); /* Force delay from prior write. */
92783- atomic_inc(&rdtp->dynticks);
92784+ atomic_inc_unchecked(&rdtp->dynticks);
92785 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
92786 smp_mb__after_atomic(); /* See above. */
92787- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
92788+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
92789 }
92790
92791 /**
92792@@ -793,9 +793,9 @@ void rcu_nmi_exit(void)
92793 return;
92794 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
92795 smp_mb__before_atomic(); /* See above. */
92796- atomic_inc(&rdtp->dynticks);
92797+ atomic_inc_unchecked(&rdtp->dynticks);
92798 smp_mb__after_atomic(); /* Force delay to next write. */
92799- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
92800+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
92801 }
92802
92803 /**
92804@@ -808,7 +808,7 @@ void rcu_nmi_exit(void)
92805 */
92806 bool notrace __rcu_is_watching(void)
92807 {
92808- return atomic_read(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1;
92809+ return atomic_read_unchecked(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1;
92810 }
92811
92812 /**
92813@@ -891,7 +891,7 @@ static int rcu_is_cpu_rrupt_from_idle(void)
92814 static int dyntick_save_progress_counter(struct rcu_data *rdp,
92815 bool *isidle, unsigned long *maxj)
92816 {
92817- rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
92818+ rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
92819 rcu_sysidle_check_cpu(rdp, isidle, maxj);
92820 if ((rdp->dynticks_snap & 0x1) == 0) {
92821 trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
92822@@ -920,7 +920,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
92823 int *rcrmp;
92824 unsigned int snap;
92825
92826- curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
92827+ curr = (unsigned int)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
92828 snap = (unsigned int)rdp->dynticks_snap;
92829
92830 /*
92831@@ -983,10 +983,10 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
92832 rdp->rsp->gp_start + jiffies_till_sched_qs) ||
92833 ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) {
92834 if (!(ACCESS_ONCE(*rcrmp) & rdp->rsp->flavor_mask)) {
92835- ACCESS_ONCE(rdp->cond_resched_completed) =
92836+ ACCESS_ONCE_RW(rdp->cond_resched_completed) =
92837 ACCESS_ONCE(rdp->mynode->completed);
92838 smp_mb(); /* ->cond_resched_completed before *rcrmp. */
92839- ACCESS_ONCE(*rcrmp) =
92840+ ACCESS_ONCE_RW(*rcrmp) =
92841 ACCESS_ONCE(*rcrmp) + rdp->rsp->flavor_mask;
92842 resched_cpu(rdp->cpu); /* Force CPU into scheduler. */
92843 rdp->rsp->jiffies_resched += 5; /* Enable beating. */
92844@@ -1008,7 +1008,7 @@ static void record_gp_stall_check_time(struct rcu_state *rsp)
92845 rsp->gp_start = j;
92846 smp_wmb(); /* Record start time before stall time. */
92847 j1 = rcu_jiffies_till_stall_check();
92848- ACCESS_ONCE(rsp->jiffies_stall) = j + j1;
92849+ ACCESS_ONCE_RW(rsp->jiffies_stall) = j + j1;
92850 rsp->jiffies_resched = j + j1 / 2;
92851 }
92852
92853@@ -1049,7 +1049,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
92854 raw_spin_unlock_irqrestore(&rnp->lock, flags);
92855 return;
92856 }
92857- ACCESS_ONCE(rsp->jiffies_stall) = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
92858+ ACCESS_ONCE_RW(rsp->jiffies_stall) = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
92859 raw_spin_unlock_irqrestore(&rnp->lock, flags);
92860
92861 /*
92862@@ -1126,7 +1126,7 @@ static void print_cpu_stall(struct rcu_state *rsp)
92863
92864 raw_spin_lock_irqsave(&rnp->lock, flags);
92865 if (ULONG_CMP_GE(jiffies, ACCESS_ONCE(rsp->jiffies_stall)))
92866- ACCESS_ONCE(rsp->jiffies_stall) = jiffies +
92867+ ACCESS_ONCE_RW(rsp->jiffies_stall) = jiffies +
92868 3 * rcu_jiffies_till_stall_check() + 3;
92869 raw_spin_unlock_irqrestore(&rnp->lock, flags);
92870
92871@@ -1210,7 +1210,7 @@ void rcu_cpu_stall_reset(void)
92872 struct rcu_state *rsp;
92873
92874 for_each_rcu_flavor(rsp)
92875- ACCESS_ONCE(rsp->jiffies_stall) = jiffies + ULONG_MAX / 2;
92876+ ACCESS_ONCE_RW(rsp->jiffies_stall) = jiffies + ULONG_MAX / 2;
92877 }
92878
92879 /*
92880@@ -1596,7 +1596,7 @@ static int rcu_gp_init(struct rcu_state *rsp)
92881 raw_spin_unlock_irq(&rnp->lock);
92882 return 0;
92883 }
92884- ACCESS_ONCE(rsp->gp_flags) = 0; /* Clear all flags: New grace period. */
92885+ ACCESS_ONCE_RW(rsp->gp_flags) = 0; /* Clear all flags: New grace period. */
92886
92887 if (WARN_ON_ONCE(rcu_gp_in_progress(rsp))) {
92888 /*
92889@@ -1637,9 +1637,9 @@ static int rcu_gp_init(struct rcu_state *rsp)
92890 rdp = this_cpu_ptr(rsp->rda);
92891 rcu_preempt_check_blocked_tasks(rnp);
92892 rnp->qsmask = rnp->qsmaskinit;
92893- ACCESS_ONCE(rnp->gpnum) = rsp->gpnum;
92894+ ACCESS_ONCE_RW(rnp->gpnum) = rsp->gpnum;
92895 WARN_ON_ONCE(rnp->completed != rsp->completed);
92896- ACCESS_ONCE(rnp->completed) = rsp->completed;
92897+ ACCESS_ONCE_RW(rnp->completed) = rsp->completed;
92898 if (rnp == rdp->mynode)
92899 (void)__note_gp_changes(rsp, rnp, rdp);
92900 rcu_preempt_boost_start_gp(rnp);
92901@@ -1684,7 +1684,7 @@ static int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in)
92902 if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
92903 raw_spin_lock_irq(&rnp->lock);
92904 smp_mb__after_unlock_lock();
92905- ACCESS_ONCE(rsp->gp_flags) &= ~RCU_GP_FLAG_FQS;
92906+ ACCESS_ONCE_RW(rsp->gp_flags) &= ~RCU_GP_FLAG_FQS;
92907 raw_spin_unlock_irq(&rnp->lock);
92908 }
92909 return fqs_state;
92910@@ -1729,7 +1729,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
92911 rcu_for_each_node_breadth_first(rsp, rnp) {
92912 raw_spin_lock_irq(&rnp->lock);
92913 smp_mb__after_unlock_lock();
92914- ACCESS_ONCE(rnp->completed) = rsp->gpnum;
92915+ ACCESS_ONCE_RW(rnp->completed) = rsp->gpnum;
92916 rdp = this_cpu_ptr(rsp->rda);
92917 if (rnp == rdp->mynode)
92918 needgp = __note_gp_changes(rsp, rnp, rdp) || needgp;
92919@@ -1744,14 +1744,14 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
92920 rcu_nocb_gp_set(rnp, nocb);
92921
92922 /* Declare grace period done. */
92923- ACCESS_ONCE(rsp->completed) = rsp->gpnum;
92924+ ACCESS_ONCE_RW(rsp->completed) = rsp->gpnum;
92925 trace_rcu_grace_period(rsp->name, rsp->completed, TPS("end"));
92926 rsp->fqs_state = RCU_GP_IDLE;
92927 rdp = this_cpu_ptr(rsp->rda);
92928 /* Advance CBs to reduce false positives below. */
92929 needgp = rcu_advance_cbs(rsp, rnp, rdp) || needgp;
92930 if (needgp || cpu_needs_another_gp(rsp, rdp)) {
92931- ACCESS_ONCE(rsp->gp_flags) = RCU_GP_FLAG_INIT;
92932+ ACCESS_ONCE_RW(rsp->gp_flags) = RCU_GP_FLAG_INIT;
92933 trace_rcu_grace_period(rsp->name,
92934 ACCESS_ONCE(rsp->gpnum),
92935 TPS("newreq"));
92936@@ -1876,7 +1876,7 @@ rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
92937 */
92938 return false;
92939 }
92940- ACCESS_ONCE(rsp->gp_flags) = RCU_GP_FLAG_INIT;
92941+ ACCESS_ONCE_RW(rsp->gp_flags) = RCU_GP_FLAG_INIT;
92942 trace_rcu_grace_period(rsp->name, ACCESS_ONCE(rsp->gpnum),
92943 TPS("newreq"));
92944
92945@@ -2097,7 +2097,7 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
92946 rsp->qlen += rdp->qlen;
92947 rdp->n_cbs_orphaned += rdp->qlen;
92948 rdp->qlen_lazy = 0;
92949- ACCESS_ONCE(rdp->qlen) = 0;
92950+ ACCESS_ONCE_RW(rdp->qlen) = 0;
92951 }
92952
92953 /*
92954@@ -2344,7 +2344,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
92955 }
92956 smp_mb(); /* List handling before counting for rcu_barrier(). */
92957 rdp->qlen_lazy -= count_lazy;
92958- ACCESS_ONCE(rdp->qlen) = rdp->qlen - count;
92959+ ACCESS_ONCE_RW(rdp->qlen) = rdp->qlen - count;
92960 rdp->n_cbs_invoked += count;
92961
92962 /* Reinstate batch limit if we have worked down the excess. */
92963@@ -2505,7 +2505,7 @@ static void force_quiescent_state(struct rcu_state *rsp)
92964 raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
92965 return; /* Someone beat us to it. */
92966 }
92967- ACCESS_ONCE(rsp->gp_flags) |= RCU_GP_FLAG_FQS;
92968+ ACCESS_ONCE_RW(rsp->gp_flags) |= RCU_GP_FLAG_FQS;
92969 raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
92970 wake_up(&rsp->gp_wq); /* Memory barrier implied by wake_up() path. */
92971 }
92972@@ -2550,7 +2550,7 @@ __rcu_process_callbacks(struct rcu_state *rsp)
92973 /*
92974 * Do RCU core processing for the current CPU.
92975 */
92976-static void rcu_process_callbacks(struct softirq_action *unused)
92977+static void rcu_process_callbacks(void)
92978 {
92979 struct rcu_state *rsp;
92980
92981@@ -2662,7 +2662,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
92982 WARN_ON_ONCE((unsigned long)head & 0x1); /* Misaligned rcu_head! */
92983 if (debug_rcu_head_queue(head)) {
92984 /* Probable double call_rcu(), so leak the callback. */
92985- ACCESS_ONCE(head->func) = rcu_leak_callback;
92986+ ACCESS_ONCE_RW(head->func) = rcu_leak_callback;
92987 WARN_ONCE(1, "__call_rcu(): Leaked duplicate callback\n");
92988 return;
92989 }
92990@@ -2690,7 +2690,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
92991 local_irq_restore(flags);
92992 return;
92993 }
92994- ACCESS_ONCE(rdp->qlen) = rdp->qlen + 1;
92995+ ACCESS_ONCE_RW(rdp->qlen) = rdp->qlen + 1;
92996 if (lazy)
92997 rdp->qlen_lazy++;
92998 else
92999@@ -2965,11 +2965,11 @@ void synchronize_sched_expedited(void)
93000 * counter wrap on a 32-bit system. Quite a few more CPUs would of
93001 * course be required on a 64-bit system.
93002 */
93003- if (ULONG_CMP_GE((ulong)atomic_long_read(&rsp->expedited_start),
93004+ if (ULONG_CMP_GE((ulong)atomic_long_read_unchecked(&rsp->expedited_start),
93005 (ulong)atomic_long_read(&rsp->expedited_done) +
93006 ULONG_MAX / 8)) {
93007 synchronize_sched();
93008- atomic_long_inc(&rsp->expedited_wrap);
93009+ atomic_long_inc_unchecked(&rsp->expedited_wrap);
93010 return;
93011 }
93012
93013@@ -2977,7 +2977,7 @@ void synchronize_sched_expedited(void)
93014 * Take a ticket. Note that atomic_inc_return() implies a
93015 * full memory barrier.
93016 */
93017- snap = atomic_long_inc_return(&rsp->expedited_start);
93018+ snap = atomic_long_inc_return_unchecked(&rsp->expedited_start);
93019 firstsnap = snap;
93020 get_online_cpus();
93021 WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
93022@@ -2990,14 +2990,14 @@ void synchronize_sched_expedited(void)
93023 synchronize_sched_expedited_cpu_stop,
93024 NULL) == -EAGAIN) {
93025 put_online_cpus();
93026- atomic_long_inc(&rsp->expedited_tryfail);
93027+ atomic_long_inc_unchecked(&rsp->expedited_tryfail);
93028
93029 /* Check to see if someone else did our work for us. */
93030 s = atomic_long_read(&rsp->expedited_done);
93031 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
93032 /* ensure test happens before caller kfree */
93033 smp_mb__before_atomic(); /* ^^^ */
93034- atomic_long_inc(&rsp->expedited_workdone1);
93035+ atomic_long_inc_unchecked(&rsp->expedited_workdone1);
93036 return;
93037 }
93038
93039@@ -3006,7 +3006,7 @@ void synchronize_sched_expedited(void)
93040 udelay(trycount * num_online_cpus());
93041 } else {
93042 wait_rcu_gp(call_rcu_sched);
93043- atomic_long_inc(&rsp->expedited_normal);
93044+ atomic_long_inc_unchecked(&rsp->expedited_normal);
93045 return;
93046 }
93047
93048@@ -3015,7 +3015,7 @@ void synchronize_sched_expedited(void)
93049 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
93050 /* ensure test happens before caller kfree */
93051 smp_mb__before_atomic(); /* ^^^ */
93052- atomic_long_inc(&rsp->expedited_workdone2);
93053+ atomic_long_inc_unchecked(&rsp->expedited_workdone2);
93054 return;
93055 }
93056
93057@@ -3027,10 +3027,10 @@ void synchronize_sched_expedited(void)
93058 * period works for us.
93059 */
93060 get_online_cpus();
93061- snap = atomic_long_read(&rsp->expedited_start);
93062+ snap = atomic_long_read_unchecked(&rsp->expedited_start);
93063 smp_mb(); /* ensure read is before try_stop_cpus(). */
93064 }
93065- atomic_long_inc(&rsp->expedited_stoppedcpus);
93066+ atomic_long_inc_unchecked(&rsp->expedited_stoppedcpus);
93067
93068 /*
93069 * Everyone up to our most recent fetch is covered by our grace
93070@@ -3039,16 +3039,16 @@ void synchronize_sched_expedited(void)
93071 * than we did already did their update.
93072 */
93073 do {
93074- atomic_long_inc(&rsp->expedited_done_tries);
93075+ atomic_long_inc_unchecked(&rsp->expedited_done_tries);
93076 s = atomic_long_read(&rsp->expedited_done);
93077 if (ULONG_CMP_GE((ulong)s, (ulong)snap)) {
93078 /* ensure test happens before caller kfree */
93079 smp_mb__before_atomic(); /* ^^^ */
93080- atomic_long_inc(&rsp->expedited_done_lost);
93081+ atomic_long_inc_unchecked(&rsp->expedited_done_lost);
93082 break;
93083 }
93084 } while (atomic_long_cmpxchg(&rsp->expedited_done, s, snap) != s);
93085- atomic_long_inc(&rsp->expedited_done_exit);
93086+ atomic_long_inc_unchecked(&rsp->expedited_done_exit);
93087
93088 put_online_cpus();
93089 }
93090@@ -3254,7 +3254,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
93091 * ACCESS_ONCE() to prevent the compiler from speculating
93092 * the increment to precede the early-exit check.
93093 */
93094- ACCESS_ONCE(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
93095+ ACCESS_ONCE_RW(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
93096 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1);
93097 _rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done);
93098 smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */
93099@@ -3304,7 +3304,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
93100
93101 /* Increment ->n_barrier_done to prevent duplicate work. */
93102 smp_mb(); /* Keep increment after above mechanism. */
93103- ACCESS_ONCE(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
93104+ ACCESS_ONCE_RW(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
93105 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0);
93106 _rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done);
93107 smp_mb(); /* Keep increment before caller's subsequent code. */
93108@@ -3349,10 +3349,10 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
93109 rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
93110 init_callback_list(rdp);
93111 rdp->qlen_lazy = 0;
93112- ACCESS_ONCE(rdp->qlen) = 0;
93113+ ACCESS_ONCE_RW(rdp->qlen) = 0;
93114 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
93115 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
93116- WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
93117+ WARN_ON_ONCE(atomic_read_unchecked(&rdp->dynticks->dynticks) != 1);
93118 rdp->cpu = cpu;
93119 rdp->rsp = rsp;
93120 rcu_boot_init_nocb_percpu_data(rdp);
93121@@ -3385,8 +3385,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
93122 init_callback_list(rdp); /* Re-enable callbacks on this CPU. */
93123 rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
93124 rcu_sysidle_init_percpu_data(rdp->dynticks);
93125- atomic_set(&rdp->dynticks->dynticks,
93126- (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
93127+ atomic_set_unchecked(&rdp->dynticks->dynticks,
93128+ (atomic_read_unchecked(&rdp->dynticks->dynticks) & ~0x1) + 1);
93129 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
93130
93131 /* Add CPU to rcu_node bitmasks. */
93132diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
93133index 6a86eb7..022b506 100644
93134--- a/kernel/rcu/tree.h
93135+++ b/kernel/rcu/tree.h
93136@@ -87,11 +87,11 @@ struct rcu_dynticks {
93137 long long dynticks_nesting; /* Track irq/process nesting level. */
93138 /* Process level is worth LLONG_MAX/2. */
93139 int dynticks_nmi_nesting; /* Track NMI nesting level. */
93140- atomic_t dynticks; /* Even value for idle, else odd. */
93141+ atomic_unchecked_t dynticks;/* Even value for idle, else odd. */
93142 #ifdef CONFIG_NO_HZ_FULL_SYSIDLE
93143 long long dynticks_idle_nesting;
93144 /* irq/process nesting level from idle. */
93145- atomic_t dynticks_idle; /* Even value for idle, else odd. */
93146+ atomic_unchecked_t dynticks_idle;/* Even value for idle, else odd. */
93147 /* "Idle" excludes userspace execution. */
93148 unsigned long dynticks_idle_jiffies;
93149 /* End of last non-NMI non-idle period. */
93150@@ -461,17 +461,17 @@ struct rcu_state {
93151 /* _rcu_barrier(). */
93152 /* End of fields guarded by barrier_mutex. */
93153
93154- atomic_long_t expedited_start; /* Starting ticket. */
93155- atomic_long_t expedited_done; /* Done ticket. */
93156- atomic_long_t expedited_wrap; /* # near-wrap incidents. */
93157- atomic_long_t expedited_tryfail; /* # acquisition failures. */
93158- atomic_long_t expedited_workdone1; /* # done by others #1. */
93159- atomic_long_t expedited_workdone2; /* # done by others #2. */
93160- atomic_long_t expedited_normal; /* # fallbacks to normal. */
93161- atomic_long_t expedited_stoppedcpus; /* # successful stop_cpus. */
93162- atomic_long_t expedited_done_tries; /* # tries to update _done. */
93163- atomic_long_t expedited_done_lost; /* # times beaten to _done. */
93164- atomic_long_t expedited_done_exit; /* # times exited _done loop. */
93165+ atomic_long_unchecked_t expedited_start; /* Starting ticket. */
93166+ atomic_long_t expedited_done; /* Done ticket. */
93167+ atomic_long_unchecked_t expedited_wrap; /* # near-wrap incidents. */
93168+ atomic_long_unchecked_t expedited_tryfail; /* # acquisition failures. */
93169+ atomic_long_unchecked_t expedited_workdone1; /* # done by others #1. */
93170+ atomic_long_unchecked_t expedited_workdone2; /* # done by others #2. */
93171+ atomic_long_unchecked_t expedited_normal; /* # fallbacks to normal. */
93172+ atomic_long_unchecked_t expedited_stoppedcpus; /* # successful stop_cpus. */
93173+ atomic_long_unchecked_t expedited_done_tries; /* # tries to update _done. */
93174+ atomic_long_unchecked_t expedited_done_lost; /* # times beaten to _done. */
93175+ atomic_long_unchecked_t expedited_done_exit; /* # times exited _done loop. */
93176
93177 unsigned long jiffies_force_qs; /* Time at which to invoke */
93178 /* force_quiescent_state(). */
93179diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
93180index a7997e2..9787c9e 100644
93181--- a/kernel/rcu/tree_plugin.h
93182+++ b/kernel/rcu/tree_plugin.h
93183@@ -735,7 +735,7 @@ static int rcu_preempted_readers_exp(struct rcu_node *rnp)
93184 static int sync_rcu_preempt_exp_done(struct rcu_node *rnp)
93185 {
93186 return !rcu_preempted_readers_exp(rnp) &&
93187- ACCESS_ONCE(rnp->expmask) == 0;
93188+ ACCESS_ONCE_RW(rnp->expmask) == 0;
93189 }
93190
93191 /*
93192@@ -897,7 +897,7 @@ void synchronize_rcu_expedited(void)
93193
93194 /* Clean up and exit. */
93195 smp_mb(); /* ensure expedited GP seen before counter increment. */
93196- ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
93197+ ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
93198 unlock_mb_ret:
93199 mutex_unlock(&sync_rcu_preempt_exp_mutex);
93200 mb_ret:
93201@@ -1452,7 +1452,7 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
93202 free_cpumask_var(cm);
93203 }
93204
93205-static struct smp_hotplug_thread rcu_cpu_thread_spec = {
93206+static struct smp_hotplug_thread rcu_cpu_thread_spec __read_only = {
93207 .store = &rcu_cpu_kthread_task,
93208 .thread_should_run = rcu_cpu_kthread_should_run,
93209 .thread_fn = rcu_cpu_kthread,
93210@@ -1932,7 +1932,7 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
93211 print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
93212 pr_err("\t%d: (%lu %s) idle=%03x/%llx/%d softirq=%u/%u %s\n",
93213 cpu, ticks_value, ticks_title,
93214- atomic_read(&rdtp->dynticks) & 0xfff,
93215+ atomic_read_unchecked(&rdtp->dynticks) & 0xfff,
93216 rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
93217 rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
93218 fast_no_hz);
93219@@ -2076,7 +2076,7 @@ static void wake_nocb_leader(struct rcu_data *rdp, bool force)
93220 return;
93221 if (ACCESS_ONCE(rdp_leader->nocb_leader_sleep) || force) {
93222 /* Prior xchg orders against prior callback enqueue. */
93223- ACCESS_ONCE(rdp_leader->nocb_leader_sleep) = false;
93224+ ACCESS_ONCE_RW(rdp_leader->nocb_leader_sleep) = false;
93225 wake_up(&rdp_leader->nocb_wq);
93226 }
93227 }
93228@@ -2101,7 +2101,7 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
93229
93230 /* Enqueue the callback on the nocb list and update counts. */
93231 old_rhpp = xchg(&rdp->nocb_tail, rhtp);
93232- ACCESS_ONCE(*old_rhpp) = rhp;
93233+ ACCESS_ONCE_RW(*old_rhpp) = rhp;
93234 atomic_long_add(rhcount, &rdp->nocb_q_count);
93235 atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy);
93236
93237@@ -2272,7 +2272,7 @@ wait_again:
93238 continue; /* No CBs here, try next follower. */
93239
93240 /* Move callbacks to wait-for-GP list, which is empty. */
93241- ACCESS_ONCE(rdp->nocb_head) = NULL;
93242+ ACCESS_ONCE_RW(rdp->nocb_head) = NULL;
93243 rdp->nocb_gp_tail = xchg(&rdp->nocb_tail, &rdp->nocb_head);
93244 rdp->nocb_gp_count = atomic_long_xchg(&rdp->nocb_q_count, 0);
93245 rdp->nocb_gp_count_lazy =
93246@@ -2398,7 +2398,7 @@ static int rcu_nocb_kthread(void *arg)
93247 list = ACCESS_ONCE(rdp->nocb_follower_head);
93248 BUG_ON(!list);
93249 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, "WokeNonEmpty");
93250- ACCESS_ONCE(rdp->nocb_follower_head) = NULL;
93251+ ACCESS_ONCE_RW(rdp->nocb_follower_head) = NULL;
93252 tail = xchg(&rdp->nocb_follower_tail, &rdp->nocb_follower_head);
93253 c = atomic_long_xchg(&rdp->nocb_follower_count, 0);
93254 cl = atomic_long_xchg(&rdp->nocb_follower_count_lazy, 0);
93255@@ -2428,8 +2428,8 @@ static int rcu_nocb_kthread(void *arg)
93256 list = next;
93257 }
93258 trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1);
93259- ACCESS_ONCE(rdp->nocb_p_count) -= c;
93260- ACCESS_ONCE(rdp->nocb_p_count_lazy) -= cl;
93261+ ACCESS_ONCE_RW(rdp->nocb_p_count) -= c;
93262+ ACCESS_ONCE_RW(rdp->nocb_p_count_lazy) -= cl;
93263 rdp->n_nocbs_invoked += c;
93264 }
93265 return 0;
93266@@ -2446,7 +2446,7 @@ static void do_nocb_deferred_wakeup(struct rcu_data *rdp)
93267 {
93268 if (!rcu_nocb_need_deferred_wakeup(rdp))
93269 return;
93270- ACCESS_ONCE(rdp->nocb_defer_wakeup) = false;
93271+ ACCESS_ONCE_RW(rdp->nocb_defer_wakeup) = false;
93272 wake_nocb_leader(rdp, false);
93273 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("DeferredWakeEmpty"));
93274 }
93275@@ -2510,7 +2510,7 @@ static void __init rcu_spawn_nocb_kthreads(struct rcu_state *rsp)
93276 t = kthread_run(rcu_nocb_kthread, rdp,
93277 "rcuo%c/%d", rsp->abbr, cpu);
93278 BUG_ON(IS_ERR(t));
93279- ACCESS_ONCE(rdp->nocb_kthread) = t;
93280+ ACCESS_ONCE_RW(rdp->nocb_kthread) = t;
93281 }
93282 }
93283
93284@@ -2641,11 +2641,11 @@ static void rcu_sysidle_enter(struct rcu_dynticks *rdtp, int irq)
93285
93286 /* Record start of fully idle period. */
93287 j = jiffies;
93288- ACCESS_ONCE(rdtp->dynticks_idle_jiffies) = j;
93289+ ACCESS_ONCE_RW(rdtp->dynticks_idle_jiffies) = j;
93290 smp_mb__before_atomic();
93291- atomic_inc(&rdtp->dynticks_idle);
93292+ atomic_inc_unchecked(&rdtp->dynticks_idle);
93293 smp_mb__after_atomic();
93294- WARN_ON_ONCE(atomic_read(&rdtp->dynticks_idle) & 0x1);
93295+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks_idle) & 0x1);
93296 }
93297
93298 /*
93299@@ -2710,9 +2710,9 @@ static void rcu_sysidle_exit(struct rcu_dynticks *rdtp, int irq)
93300
93301 /* Record end of idle period. */
93302 smp_mb__before_atomic();
93303- atomic_inc(&rdtp->dynticks_idle);
93304+ atomic_inc_unchecked(&rdtp->dynticks_idle);
93305 smp_mb__after_atomic();
93306- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks_idle) & 0x1));
93307+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks_idle) & 0x1));
93308
93309 /*
93310 * If we are the timekeeping CPU, we are permitted to be non-idle
93311@@ -2753,7 +2753,7 @@ static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle,
93312 WARN_ON_ONCE(smp_processor_id() != tick_do_timer_cpu);
93313
93314 /* Pick up current idle and NMI-nesting counter and check. */
93315- cur = atomic_read(&rdtp->dynticks_idle);
93316+ cur = atomic_read_unchecked(&rdtp->dynticks_idle);
93317 if (cur & 0x1) {
93318 *isidle = false; /* We are not idle! */
93319 return;
93320@@ -2802,7 +2802,7 @@ static void rcu_sysidle(unsigned long j)
93321 case RCU_SYSIDLE_NOT:
93322
93323 /* First time all are idle, so note a short idle period. */
93324- ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_SHORT;
93325+ ACCESS_ONCE_RW(full_sysidle_state) = RCU_SYSIDLE_SHORT;
93326 break;
93327
93328 case RCU_SYSIDLE_SHORT:
93329@@ -2840,7 +2840,7 @@ static void rcu_sysidle_cancel(void)
93330 {
93331 smp_mb();
93332 if (full_sysidle_state > RCU_SYSIDLE_SHORT)
93333- ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_NOT;
93334+ ACCESS_ONCE_RW(full_sysidle_state) = RCU_SYSIDLE_NOT;
93335 }
93336
93337 /*
93338@@ -2888,7 +2888,7 @@ static void rcu_sysidle_cb(struct rcu_head *rhp)
93339 smp_mb(); /* grace period precedes setting inuse. */
93340
93341 rshp = container_of(rhp, struct rcu_sysidle_head, rh);
93342- ACCESS_ONCE(rshp->inuse) = 0;
93343+ ACCESS_ONCE_RW(rshp->inuse) = 0;
93344 }
93345
93346 /*
93347diff --git a/kernel/rcu/tree_trace.c b/kernel/rcu/tree_trace.c
93348index 5cdc62e..cc52e88 100644
93349--- a/kernel/rcu/tree_trace.c
93350+++ b/kernel/rcu/tree_trace.c
93351@@ -121,7 +121,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
93352 ulong2long(rdp->completed), ulong2long(rdp->gpnum),
93353 rdp->passed_quiesce, rdp->qs_pending);
93354 seq_printf(m, " dt=%d/%llx/%d df=%lu",
93355- atomic_read(&rdp->dynticks->dynticks),
93356+ atomic_read_unchecked(&rdp->dynticks->dynticks),
93357 rdp->dynticks->dynticks_nesting,
93358 rdp->dynticks->dynticks_nmi_nesting,
93359 rdp->dynticks_fqs);
93360@@ -182,17 +182,17 @@ static int show_rcuexp(struct seq_file *m, void *v)
93361 struct rcu_state *rsp = (struct rcu_state *)m->private;
93362
93363 seq_printf(m, "s=%lu d=%lu w=%lu tf=%lu wd1=%lu wd2=%lu n=%lu sc=%lu dt=%lu dl=%lu dx=%lu\n",
93364- atomic_long_read(&rsp->expedited_start),
93365+ atomic_long_read_unchecked(&rsp->expedited_start),
93366 atomic_long_read(&rsp->expedited_done),
93367- atomic_long_read(&rsp->expedited_wrap),
93368- atomic_long_read(&rsp->expedited_tryfail),
93369- atomic_long_read(&rsp->expedited_workdone1),
93370- atomic_long_read(&rsp->expedited_workdone2),
93371- atomic_long_read(&rsp->expedited_normal),
93372- atomic_long_read(&rsp->expedited_stoppedcpus),
93373- atomic_long_read(&rsp->expedited_done_tries),
93374- atomic_long_read(&rsp->expedited_done_lost),
93375- atomic_long_read(&rsp->expedited_done_exit));
93376+ atomic_long_read_unchecked(&rsp->expedited_wrap),
93377+ atomic_long_read_unchecked(&rsp->expedited_tryfail),
93378+ atomic_long_read_unchecked(&rsp->expedited_workdone1),
93379+ atomic_long_read_unchecked(&rsp->expedited_workdone2),
93380+ atomic_long_read_unchecked(&rsp->expedited_normal),
93381+ atomic_long_read_unchecked(&rsp->expedited_stoppedcpus),
93382+ atomic_long_read_unchecked(&rsp->expedited_done_tries),
93383+ atomic_long_read_unchecked(&rsp->expedited_done_lost),
93384+ atomic_long_read_unchecked(&rsp->expedited_done_exit));
93385 return 0;
93386 }
93387
93388diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
93389index 4056d79..c11741a 100644
93390--- a/kernel/rcu/update.c
93391+++ b/kernel/rcu/update.c
93392@@ -308,10 +308,10 @@ int rcu_jiffies_till_stall_check(void)
93393 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
93394 */
93395 if (till_stall_check < 3) {
93396- ACCESS_ONCE(rcu_cpu_stall_timeout) = 3;
93397+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 3;
93398 till_stall_check = 3;
93399 } else if (till_stall_check > 300) {
93400- ACCESS_ONCE(rcu_cpu_stall_timeout) = 300;
93401+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 300;
93402 till_stall_check = 300;
93403 }
93404 return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
93405diff --git a/kernel/resource.c b/kernel/resource.c
93406index 60c5a38..ed77193 100644
93407--- a/kernel/resource.c
93408+++ b/kernel/resource.c
93409@@ -161,8 +161,18 @@ static const struct file_operations proc_iomem_operations = {
93410
93411 static int __init ioresources_init(void)
93412 {
93413+#ifdef CONFIG_GRKERNSEC_PROC_ADD
93414+#ifdef CONFIG_GRKERNSEC_PROC_USER
93415+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
93416+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
93417+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
93418+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
93419+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
93420+#endif
93421+#else
93422 proc_create("ioports", 0, NULL, &proc_ioports_operations);
93423 proc_create("iomem", 0, NULL, &proc_iomem_operations);
93424+#endif
93425 return 0;
93426 }
93427 __initcall(ioresources_init);
93428diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
93429index e73efba..c9bfbd4 100644
93430--- a/kernel/sched/auto_group.c
93431+++ b/kernel/sched/auto_group.c
93432@@ -11,7 +11,7 @@
93433
93434 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
93435 static struct autogroup autogroup_default;
93436-static atomic_t autogroup_seq_nr;
93437+static atomic_unchecked_t autogroup_seq_nr;
93438
93439 void __init autogroup_init(struct task_struct *init_task)
93440 {
93441@@ -79,7 +79,7 @@ static inline struct autogroup *autogroup_create(void)
93442
93443 kref_init(&ag->kref);
93444 init_rwsem(&ag->lock);
93445- ag->id = atomic_inc_return(&autogroup_seq_nr);
93446+ ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
93447 ag->tg = tg;
93448 #ifdef CONFIG_RT_GROUP_SCHED
93449 /*
93450diff --git a/kernel/sched/completion.c b/kernel/sched/completion.c
93451index a63f4dc..349bbb0 100644
93452--- a/kernel/sched/completion.c
93453+++ b/kernel/sched/completion.c
93454@@ -204,7 +204,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible);
93455 * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
93456 * or number of jiffies left till timeout) if completed.
93457 */
93458-long __sched
93459+long __sched __intentional_overflow(-1)
93460 wait_for_completion_interruptible_timeout(struct completion *x,
93461 unsigned long timeout)
93462 {
93463@@ -221,7 +221,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
93464 *
93465 * Return: -ERESTARTSYS if interrupted, 0 if completed.
93466 */
93467-int __sched wait_for_completion_killable(struct completion *x)
93468+int __sched __intentional_overflow(-1) wait_for_completion_killable(struct completion *x)
93469 {
93470 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
93471 if (t == -ERESTARTSYS)
93472@@ -242,7 +242,7 @@ EXPORT_SYMBOL(wait_for_completion_killable);
93473 * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
93474 * or number of jiffies left till timeout) if completed.
93475 */
93476-long __sched
93477+long __sched __intentional_overflow(-1)
93478 wait_for_completion_killable_timeout(struct completion *x,
93479 unsigned long timeout)
93480 {
93481diff --git a/kernel/sched/core.c b/kernel/sched/core.c
93482index ec1a286..6b516b8 100644
93483--- a/kernel/sched/core.c
93484+++ b/kernel/sched/core.c
93485@@ -1857,7 +1857,7 @@ void set_numabalancing_state(bool enabled)
93486 int sysctl_numa_balancing(struct ctl_table *table, int write,
93487 void __user *buffer, size_t *lenp, loff_t *ppos)
93488 {
93489- struct ctl_table t;
93490+ ctl_table_no_const t;
93491 int err;
93492 int state = numabalancing_enabled;
93493
93494@@ -2320,8 +2320,10 @@ context_switch(struct rq *rq, struct task_struct *prev,
93495 next->active_mm = oldmm;
93496 atomic_inc(&oldmm->mm_count);
93497 enter_lazy_tlb(oldmm, next);
93498- } else
93499+ } else {
93500 switch_mm(oldmm, mm, next);
93501+ populate_stack();
93502+ }
93503
93504 if (!prev->mm) {
93505 prev->active_mm = NULL;
93506@@ -3103,6 +3105,8 @@ int can_nice(const struct task_struct *p, const int nice)
93507 /* convert nice value [19,-20] to rlimit style value [1,40] */
93508 int nice_rlim = nice_to_rlimit(nice);
93509
93510+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
93511+
93512 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
93513 capable(CAP_SYS_NICE));
93514 }
93515@@ -3129,7 +3133,8 @@ SYSCALL_DEFINE1(nice, int, increment)
93516 nice = task_nice(current) + increment;
93517
93518 nice = clamp_val(nice, MIN_NICE, MAX_NICE);
93519- if (increment < 0 && !can_nice(current, nice))
93520+ if (increment < 0 && (!can_nice(current, nice) ||
93521+ gr_handle_chroot_nice()))
93522 return -EPERM;
93523
93524 retval = security_task_setnice(current, nice);
93525@@ -3408,6 +3413,7 @@ recheck:
93526 if (policy != p->policy && !rlim_rtprio)
93527 return -EPERM;
93528
93529+ gr_learn_resource(p, RLIMIT_RTPRIO, attr->sched_priority, 1);
93530 /* can't increase priority */
93531 if (attr->sched_priority > p->rt_priority &&
93532 attr->sched_priority > rlim_rtprio)
93533@@ -4797,6 +4803,7 @@ void idle_task_exit(void)
93534
93535 if (mm != &init_mm) {
93536 switch_mm(mm, &init_mm, current);
93537+ populate_stack();
93538 finish_arch_post_lock_switch();
93539 }
93540 mmdrop(mm);
93541@@ -4892,7 +4899,7 @@ static void migrate_tasks(unsigned int dead_cpu)
93542
93543 #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
93544
93545-static struct ctl_table sd_ctl_dir[] = {
93546+static ctl_table_no_const sd_ctl_dir[] __read_only = {
93547 {
93548 .procname = "sched_domain",
93549 .mode = 0555,
93550@@ -4909,17 +4916,17 @@ static struct ctl_table sd_ctl_root[] = {
93551 {}
93552 };
93553
93554-static struct ctl_table *sd_alloc_ctl_entry(int n)
93555+static ctl_table_no_const *sd_alloc_ctl_entry(int n)
93556 {
93557- struct ctl_table *entry =
93558+ ctl_table_no_const *entry =
93559 kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
93560
93561 return entry;
93562 }
93563
93564-static void sd_free_ctl_entry(struct ctl_table **tablep)
93565+static void sd_free_ctl_entry(ctl_table_no_const *tablep)
93566 {
93567- struct ctl_table *entry;
93568+ ctl_table_no_const *entry;
93569
93570 /*
93571 * In the intermediate directories, both the child directory and
93572@@ -4927,22 +4934,25 @@ static void sd_free_ctl_entry(struct ctl_table **tablep)
93573 * will always be set. In the lowest directory the names are
93574 * static strings and all have proc handlers.
93575 */
93576- for (entry = *tablep; entry->mode; entry++) {
93577- if (entry->child)
93578- sd_free_ctl_entry(&entry->child);
93579+ for (entry = tablep; entry->mode; entry++) {
93580+ if (entry->child) {
93581+ sd_free_ctl_entry(entry->child);
93582+ pax_open_kernel();
93583+ entry->child = NULL;
93584+ pax_close_kernel();
93585+ }
93586 if (entry->proc_handler == NULL)
93587 kfree(entry->procname);
93588 }
93589
93590- kfree(*tablep);
93591- *tablep = NULL;
93592+ kfree(tablep);
93593 }
93594
93595 static int min_load_idx = 0;
93596 static int max_load_idx = CPU_LOAD_IDX_MAX-1;
93597
93598 static void
93599-set_table_entry(struct ctl_table *entry,
93600+set_table_entry(ctl_table_no_const *entry,
93601 const char *procname, void *data, int maxlen,
93602 umode_t mode, proc_handler *proc_handler,
93603 bool load_idx)
93604@@ -4962,7 +4972,7 @@ set_table_entry(struct ctl_table *entry,
93605 static struct ctl_table *
93606 sd_alloc_ctl_domain_table(struct sched_domain *sd)
93607 {
93608- struct ctl_table *table = sd_alloc_ctl_entry(14);
93609+ ctl_table_no_const *table = sd_alloc_ctl_entry(14);
93610
93611 if (table == NULL)
93612 return NULL;
93613@@ -5000,9 +5010,9 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd)
93614 return table;
93615 }
93616
93617-static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu)
93618+static ctl_table_no_const *sd_alloc_ctl_cpu_table(int cpu)
93619 {
93620- struct ctl_table *entry, *table;
93621+ ctl_table_no_const *entry, *table;
93622 struct sched_domain *sd;
93623 int domain_num = 0, i;
93624 char buf[32];
93625@@ -5029,11 +5039,13 @@ static struct ctl_table_header *sd_sysctl_header;
93626 static void register_sched_domain_sysctl(void)
93627 {
93628 int i, cpu_num = num_possible_cpus();
93629- struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
93630+ ctl_table_no_const *entry = sd_alloc_ctl_entry(cpu_num + 1);
93631 char buf[32];
93632
93633 WARN_ON(sd_ctl_dir[0].child);
93634+ pax_open_kernel();
93635 sd_ctl_dir[0].child = entry;
93636+ pax_close_kernel();
93637
93638 if (entry == NULL)
93639 return;
93640@@ -5056,8 +5068,12 @@ static void unregister_sched_domain_sysctl(void)
93641 if (sd_sysctl_header)
93642 unregister_sysctl_table(sd_sysctl_header);
93643 sd_sysctl_header = NULL;
93644- if (sd_ctl_dir[0].child)
93645- sd_free_ctl_entry(&sd_ctl_dir[0].child);
93646+ if (sd_ctl_dir[0].child) {
93647+ sd_free_ctl_entry(sd_ctl_dir[0].child);
93648+ pax_open_kernel();
93649+ sd_ctl_dir[0].child = NULL;
93650+ pax_close_kernel();
93651+ }
93652 }
93653 #else
93654 static void register_sched_domain_sysctl(void)
93655diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
93656index bfa3c86..e58767c 100644
93657--- a/kernel/sched/fair.c
93658+++ b/kernel/sched/fair.c
93659@@ -1873,7 +1873,7 @@ void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
93660
93661 static void reset_ptenuma_scan(struct task_struct *p)
93662 {
93663- ACCESS_ONCE(p->mm->numa_scan_seq)++;
93664+ ACCESS_ONCE_RW(p->mm->numa_scan_seq)++;
93665 p->mm->numa_scan_offset = 0;
93666 }
93667
93668@@ -7339,7 +7339,7 @@ static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) { }
93669 * run_rebalance_domains is triggered when needed from the scheduler tick.
93670 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
93671 */
93672-static void run_rebalance_domains(struct softirq_action *h)
93673+static __latent_entropy void run_rebalance_domains(void)
93674 {
93675 struct rq *this_rq = this_rq();
93676 enum cpu_idle_type idle = this_rq->idle_balance ?
93677diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
93678index 579712f..a338a9d 100644
93679--- a/kernel/sched/sched.h
93680+++ b/kernel/sched/sched.h
93681@@ -1146,7 +1146,7 @@ struct sched_class {
93682 #ifdef CONFIG_FAIR_GROUP_SCHED
93683 void (*task_move_group) (struct task_struct *p, int on_rq);
93684 #endif
93685-};
93686+} __do_const;
93687
93688 static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
93689 {
93690diff --git a/kernel/seccomp.c b/kernel/seccomp.c
93691index 44eb005..84922be 100644
93692--- a/kernel/seccomp.c
93693+++ b/kernel/seccomp.c
93694@@ -395,16 +395,15 @@ static struct seccomp_filter *seccomp_prepare_filter(struct sock_fprog *fprog)
93695 if (!filter)
93696 goto free_prog;
93697
93698- filter->prog = kzalloc(bpf_prog_size(new_len),
93699- GFP_KERNEL|__GFP_NOWARN);
93700+ filter->prog = bpf_prog_alloc(bpf_prog_size(new_len), __GFP_NOWARN);
93701 if (!filter->prog)
93702 goto free_filter;
93703
93704 ret = bpf_convert_filter(fp, fprog->len, filter->prog->insnsi, &new_len);
93705 if (ret)
93706 goto free_filter_prog;
93707- kfree(fp);
93708
93709+ kfree(fp);
93710 atomic_set(&filter->usage, 1);
93711 filter->prog->len = new_len;
93712
93713@@ -413,7 +412,7 @@ static struct seccomp_filter *seccomp_prepare_filter(struct sock_fprog *fprog)
93714 return filter;
93715
93716 free_filter_prog:
93717- kfree(filter->prog);
93718+ __bpf_prog_free(filter->prog);
93719 free_filter:
93720 kfree(filter);
93721 free_prog:
93722diff --git a/kernel/signal.c b/kernel/signal.c
93723index 8f0876f..1153a5a 100644
93724--- a/kernel/signal.c
93725+++ b/kernel/signal.c
93726@@ -53,12 +53,12 @@ static struct kmem_cache *sigqueue_cachep;
93727
93728 int print_fatal_signals __read_mostly;
93729
93730-static void __user *sig_handler(struct task_struct *t, int sig)
93731+static __sighandler_t sig_handler(struct task_struct *t, int sig)
93732 {
93733 return t->sighand->action[sig - 1].sa.sa_handler;
93734 }
93735
93736-static int sig_handler_ignored(void __user *handler, int sig)
93737+static int sig_handler_ignored(__sighandler_t handler, int sig)
93738 {
93739 /* Is it explicitly or implicitly ignored? */
93740 return handler == SIG_IGN ||
93741@@ -67,7 +67,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
93742
93743 static int sig_task_ignored(struct task_struct *t, int sig, bool force)
93744 {
93745- void __user *handler;
93746+ __sighandler_t handler;
93747
93748 handler = sig_handler(t, sig);
93749
93750@@ -372,6 +372,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
93751 atomic_inc(&user->sigpending);
93752 rcu_read_unlock();
93753
93754+ if (!override_rlimit)
93755+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
93756+
93757 if (override_rlimit ||
93758 atomic_read(&user->sigpending) <=
93759 task_rlimit(t, RLIMIT_SIGPENDING)) {
93760@@ -499,7 +502,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
93761
93762 int unhandled_signal(struct task_struct *tsk, int sig)
93763 {
93764- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
93765+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
93766 if (is_global_init(tsk))
93767 return 1;
93768 if (handler != SIG_IGN && handler != SIG_DFL)
93769@@ -793,6 +796,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
93770 }
93771 }
93772
93773+ /* allow glibc communication via tgkill to other threads in our
93774+ thread group */
93775+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
93776+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
93777+ && gr_handle_signal(t, sig))
93778+ return -EPERM;
93779+
93780 return security_task_kill(t, info, sig, 0);
93781 }
93782
93783@@ -1176,7 +1186,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
93784 return send_signal(sig, info, p, 1);
93785 }
93786
93787-static int
93788+int
93789 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
93790 {
93791 return send_signal(sig, info, t, 0);
93792@@ -1213,6 +1223,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
93793 unsigned long int flags;
93794 int ret, blocked, ignored;
93795 struct k_sigaction *action;
93796+ int is_unhandled = 0;
93797
93798 spin_lock_irqsave(&t->sighand->siglock, flags);
93799 action = &t->sighand->action[sig-1];
93800@@ -1227,9 +1238,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
93801 }
93802 if (action->sa.sa_handler == SIG_DFL)
93803 t->signal->flags &= ~SIGNAL_UNKILLABLE;
93804+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
93805+ is_unhandled = 1;
93806 ret = specific_send_sig_info(sig, info, t);
93807 spin_unlock_irqrestore(&t->sighand->siglock, flags);
93808
93809+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
93810+ normal operation */
93811+ if (is_unhandled) {
93812+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
93813+ gr_handle_crash(t, sig);
93814+ }
93815+
93816 return ret;
93817 }
93818
93819@@ -1300,8 +1320,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
93820 ret = check_kill_permission(sig, info, p);
93821 rcu_read_unlock();
93822
93823- if (!ret && sig)
93824+ if (!ret && sig) {
93825 ret = do_send_sig_info(sig, info, p, true);
93826+ if (!ret)
93827+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
93828+ }
93829
93830 return ret;
93831 }
93832@@ -2903,7 +2926,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
93833 int error = -ESRCH;
93834
93835 rcu_read_lock();
93836- p = find_task_by_vpid(pid);
93837+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
93838+ /* allow glibc communication via tgkill to other threads in our
93839+ thread group */
93840+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
93841+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
93842+ p = find_task_by_vpid_unrestricted(pid);
93843+ else
93844+#endif
93845+ p = find_task_by_vpid(pid);
93846 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
93847 error = check_kill_permission(sig, info, p);
93848 /*
93849@@ -3236,8 +3267,8 @@ COMPAT_SYSCALL_DEFINE2(sigaltstack,
93850 }
93851 seg = get_fs();
93852 set_fs(KERNEL_DS);
93853- ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL),
93854- (stack_t __force __user *) &uoss,
93855+ ret = do_sigaltstack((stack_t __force_user *) (uss_ptr ? &uss : NULL),
93856+ (stack_t __force_user *) &uoss,
93857 compat_user_stack_pointer());
93858 set_fs(seg);
93859 if (ret >= 0 && uoss_ptr) {
93860diff --git a/kernel/smpboot.c b/kernel/smpboot.c
93861index eb89e18..a4e6792 100644
93862--- a/kernel/smpboot.c
93863+++ b/kernel/smpboot.c
93864@@ -288,7 +288,7 @@ int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
93865 }
93866 smpboot_unpark_thread(plug_thread, cpu);
93867 }
93868- list_add(&plug_thread->list, &hotplug_threads);
93869+ pax_list_add(&plug_thread->list, &hotplug_threads);
93870 out:
93871 mutex_unlock(&smpboot_threads_lock);
93872 return ret;
93873@@ -305,7 +305,7 @@ void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread)
93874 {
93875 get_online_cpus();
93876 mutex_lock(&smpboot_threads_lock);
93877- list_del(&plug_thread->list);
93878+ pax_list_del(&plug_thread->list);
93879 smpboot_destroy_threads(plug_thread);
93880 mutex_unlock(&smpboot_threads_lock);
93881 put_online_cpus();
93882diff --git a/kernel/softirq.c b/kernel/softirq.c
93883index 5918d22..e95d1926 100644
93884--- a/kernel/softirq.c
93885+++ b/kernel/softirq.c
93886@@ -53,7 +53,7 @@ irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
93887 EXPORT_SYMBOL(irq_stat);
93888 #endif
93889
93890-static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
93891+static struct softirq_action softirq_vec[NR_SOFTIRQS] __read_only __aligned(PAGE_SIZE);
93892
93893 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
93894
93895@@ -266,7 +266,7 @@ restart:
93896 kstat_incr_softirqs_this_cpu(vec_nr);
93897
93898 trace_softirq_entry(vec_nr);
93899- h->action(h);
93900+ h->action();
93901 trace_softirq_exit(vec_nr);
93902 if (unlikely(prev_count != preempt_count())) {
93903 pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
93904@@ -426,7 +426,7 @@ void __raise_softirq_irqoff(unsigned int nr)
93905 or_softirq_pending(1UL << nr);
93906 }
93907
93908-void open_softirq(int nr, void (*action)(struct softirq_action *))
93909+void __init open_softirq(int nr, void (*action)(void))
93910 {
93911 softirq_vec[nr].action = action;
93912 }
93913@@ -478,7 +478,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
93914 }
93915 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
93916
93917-static void tasklet_action(struct softirq_action *a)
93918+static void tasklet_action(void)
93919 {
93920 struct tasklet_struct *list;
93921
93922@@ -514,7 +514,7 @@ static void tasklet_action(struct softirq_action *a)
93923 }
93924 }
93925
93926-static void tasklet_hi_action(struct softirq_action *a)
93927+static __latent_entropy void tasklet_hi_action(void)
93928 {
93929 struct tasklet_struct *list;
93930
93931@@ -741,7 +741,7 @@ static struct notifier_block cpu_nfb = {
93932 .notifier_call = cpu_callback
93933 };
93934
93935-static struct smp_hotplug_thread softirq_threads = {
93936+static struct smp_hotplug_thread softirq_threads __read_only = {
93937 .store = &ksoftirqd,
93938 .thread_should_run = ksoftirqd_should_run,
93939 .thread_fn = run_ksoftirqd,
93940diff --git a/kernel/sys.c b/kernel/sys.c
93941index ce81291..df2ca85 100644
93942--- a/kernel/sys.c
93943+++ b/kernel/sys.c
93944@@ -148,6 +148,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
93945 error = -EACCES;
93946 goto out;
93947 }
93948+
93949+ if (gr_handle_chroot_setpriority(p, niceval)) {
93950+ error = -EACCES;
93951+ goto out;
93952+ }
93953+
93954 no_nice = security_task_setnice(p, niceval);
93955 if (no_nice) {
93956 error = no_nice;
93957@@ -351,6 +357,20 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
93958 goto error;
93959 }
93960
93961+ if (gr_check_group_change(new->gid, new->egid, INVALID_GID))
93962+ goto error;
93963+
93964+ if (!gid_eq(new->gid, old->gid)) {
93965+ /* make sure we generate a learn log for what will
93966+ end up being a role transition after a full-learning
93967+ policy is generated
93968+ CAP_SETGID is required to perform a transition
93969+ we may not log a CAP_SETGID check above, e.g.
93970+ in the case where new rgid = old egid
93971+ */
93972+ gr_learn_cap(current, new, CAP_SETGID);
93973+ }
93974+
93975 if (rgid != (gid_t) -1 ||
93976 (egid != (gid_t) -1 && !gid_eq(kegid, old->gid)))
93977 new->sgid = new->egid;
93978@@ -386,6 +406,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
93979 old = current_cred();
93980
93981 retval = -EPERM;
93982+
93983+ if (gr_check_group_change(kgid, kgid, kgid))
93984+ goto error;
93985+
93986 if (ns_capable(old->user_ns, CAP_SETGID))
93987 new->gid = new->egid = new->sgid = new->fsgid = kgid;
93988 else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid))
93989@@ -403,7 +427,7 @@ error:
93990 /*
93991 * change the user struct in a credentials set to match the new UID
93992 */
93993-static int set_user(struct cred *new)
93994+int set_user(struct cred *new)
93995 {
93996 struct user_struct *new_user;
93997
93998@@ -483,7 +507,18 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
93999 goto error;
94000 }
94001
94002+ if (gr_check_user_change(new->uid, new->euid, INVALID_UID))
94003+ goto error;
94004+
94005 if (!uid_eq(new->uid, old->uid)) {
94006+ /* make sure we generate a learn log for what will
94007+ end up being a role transition after a full-learning
94008+ policy is generated
94009+ CAP_SETUID is required to perform a transition
94010+ we may not log a CAP_SETUID check above, e.g.
94011+ in the case where new ruid = old euid
94012+ */
94013+ gr_learn_cap(current, new, CAP_SETUID);
94014 retval = set_user(new);
94015 if (retval < 0)
94016 goto error;
94017@@ -533,6 +568,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
94018 old = current_cred();
94019
94020 retval = -EPERM;
94021+
94022+ if (gr_check_crash_uid(kuid))
94023+ goto error;
94024+ if (gr_check_user_change(kuid, kuid, kuid))
94025+ goto error;
94026+
94027 if (ns_capable(old->user_ns, CAP_SETUID)) {
94028 new->suid = new->uid = kuid;
94029 if (!uid_eq(kuid, old->uid)) {
94030@@ -602,6 +643,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
94031 goto error;
94032 }
94033
94034+ if (gr_check_user_change(kruid, keuid, INVALID_UID))
94035+ goto error;
94036+
94037 if (ruid != (uid_t) -1) {
94038 new->uid = kruid;
94039 if (!uid_eq(kruid, old->uid)) {
94040@@ -684,6 +728,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
94041 goto error;
94042 }
94043
94044+ if (gr_check_group_change(krgid, kegid, INVALID_GID))
94045+ goto error;
94046+
94047 if (rgid != (gid_t) -1)
94048 new->gid = krgid;
94049 if (egid != (gid_t) -1)
94050@@ -745,12 +792,16 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
94051 uid_eq(kuid, old->suid) || uid_eq(kuid, old->fsuid) ||
94052 ns_capable(old->user_ns, CAP_SETUID)) {
94053 if (!uid_eq(kuid, old->fsuid)) {
94054+ if (gr_check_user_change(INVALID_UID, INVALID_UID, kuid))
94055+ goto error;
94056+
94057 new->fsuid = kuid;
94058 if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0)
94059 goto change_okay;
94060 }
94061 }
94062
94063+error:
94064 abort_creds(new);
94065 return old_fsuid;
94066
94067@@ -783,12 +834,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
94068 if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->egid) ||
94069 gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) ||
94070 ns_capable(old->user_ns, CAP_SETGID)) {
94071+ if (gr_check_group_change(INVALID_GID, INVALID_GID, kgid))
94072+ goto error;
94073+
94074 if (!gid_eq(kgid, old->fsgid)) {
94075 new->fsgid = kgid;
94076 goto change_okay;
94077 }
94078 }
94079
94080+error:
94081 abort_creds(new);
94082 return old_fsgid;
94083
94084@@ -1167,19 +1222,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
94085 return -EFAULT;
94086
94087 down_read(&uts_sem);
94088- error = __copy_to_user(&name->sysname, &utsname()->sysname,
94089+ error = __copy_to_user(name->sysname, &utsname()->sysname,
94090 __OLD_UTS_LEN);
94091 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
94092- error |= __copy_to_user(&name->nodename, &utsname()->nodename,
94093+ error |= __copy_to_user(name->nodename, &utsname()->nodename,
94094 __OLD_UTS_LEN);
94095 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
94096- error |= __copy_to_user(&name->release, &utsname()->release,
94097+ error |= __copy_to_user(name->release, &utsname()->release,
94098 __OLD_UTS_LEN);
94099 error |= __put_user(0, name->release + __OLD_UTS_LEN);
94100- error |= __copy_to_user(&name->version, &utsname()->version,
94101+ error |= __copy_to_user(name->version, &utsname()->version,
94102 __OLD_UTS_LEN);
94103 error |= __put_user(0, name->version + __OLD_UTS_LEN);
94104- error |= __copy_to_user(&name->machine, &utsname()->machine,
94105+ error |= __copy_to_user(name->machine, &utsname()->machine,
94106 __OLD_UTS_LEN);
94107 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
94108 up_read(&uts_sem);
94109@@ -1381,6 +1436,13 @@ int do_prlimit(struct task_struct *tsk, unsigned int resource,
94110 */
94111 new_rlim->rlim_cur = 1;
94112 }
94113+ /* Handle the case where a fork and setuid occur and then RLIMIT_NPROC
94114+ is changed to a lower value. Since tasks can be created by the same
94115+ user in between this limit change and an execve by this task, force
94116+ a recheck only for this task by setting PF_NPROC_EXCEEDED
94117+ */
94118+ if (resource == RLIMIT_NPROC && tsk->real_cred->user != INIT_USER)
94119+ tsk->flags |= PF_NPROC_EXCEEDED;
94120 }
94121 if (!retval) {
94122 if (old_rlim)
94123diff --git a/kernel/sysctl.c b/kernel/sysctl.c
94124index 75875a7..cd8e838 100644
94125--- a/kernel/sysctl.c
94126+++ b/kernel/sysctl.c
94127@@ -94,7 +94,6 @@
94128
94129
94130 #if defined(CONFIG_SYSCTL)
94131-
94132 /* External variables not in a header file. */
94133 extern int max_threads;
94134 extern int suid_dumpable;
94135@@ -115,19 +114,20 @@ extern int sysctl_nr_trim_pages;
94136
94137 /* Constants used for minimum and maximum */
94138 #ifdef CONFIG_LOCKUP_DETECTOR
94139-static int sixty = 60;
94140+static int sixty __read_only = 60;
94141 #endif
94142
94143-static int __maybe_unused neg_one = -1;
94144+static int __maybe_unused neg_one __read_only = -1;
94145
94146-static int zero;
94147-static int __maybe_unused one = 1;
94148-static int __maybe_unused two = 2;
94149-static int __maybe_unused four = 4;
94150-static unsigned long one_ul = 1;
94151-static int one_hundred = 100;
94152+static int zero __read_only = 0;
94153+static int __maybe_unused one __read_only = 1;
94154+static int __maybe_unused two __read_only = 2;
94155+static int __maybe_unused three __read_only = 3;
94156+static int __maybe_unused four __read_only = 4;
94157+static unsigned long one_ul __read_only = 1;
94158+static int one_hundred __read_only = 100;
94159 #ifdef CONFIG_PRINTK
94160-static int ten_thousand = 10000;
94161+static int ten_thousand __read_only = 10000;
94162 #endif
94163
94164 /* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */
94165@@ -181,10 +181,8 @@ static int proc_taint(struct ctl_table *table, int write,
94166 void __user *buffer, size_t *lenp, loff_t *ppos);
94167 #endif
94168
94169-#ifdef CONFIG_PRINTK
94170 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
94171 void __user *buffer, size_t *lenp, loff_t *ppos);
94172-#endif
94173
94174 static int proc_dointvec_minmax_coredump(struct ctl_table *table, int write,
94175 void __user *buffer, size_t *lenp, loff_t *ppos);
94176@@ -215,6 +213,8 @@ static int sysrq_sysctl_handler(struct ctl_table *table, int write,
94177
94178 #endif
94179
94180+extern struct ctl_table grsecurity_table[];
94181+
94182 static struct ctl_table kern_table[];
94183 static struct ctl_table vm_table[];
94184 static struct ctl_table fs_table[];
94185@@ -229,6 +229,20 @@ extern struct ctl_table epoll_table[];
94186 int sysctl_legacy_va_layout;
94187 #endif
94188
94189+#ifdef CONFIG_PAX_SOFTMODE
94190+static struct ctl_table pax_table[] = {
94191+ {
94192+ .procname = "softmode",
94193+ .data = &pax_softmode,
94194+ .maxlen = sizeof(unsigned int),
94195+ .mode = 0600,
94196+ .proc_handler = &proc_dointvec,
94197+ },
94198+
94199+ { }
94200+};
94201+#endif
94202+
94203 /* The default sysctl tables: */
94204
94205 static struct ctl_table sysctl_base_table[] = {
94206@@ -277,6 +291,22 @@ static int max_extfrag_threshold = 1000;
94207 #endif
94208
94209 static struct ctl_table kern_table[] = {
94210+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
94211+ {
94212+ .procname = "grsecurity",
94213+ .mode = 0500,
94214+ .child = grsecurity_table,
94215+ },
94216+#endif
94217+
94218+#ifdef CONFIG_PAX_SOFTMODE
94219+ {
94220+ .procname = "pax",
94221+ .mode = 0500,
94222+ .child = pax_table,
94223+ },
94224+#endif
94225+
94226 {
94227 .procname = "sched_child_runs_first",
94228 .data = &sysctl_sched_child_runs_first,
94229@@ -641,7 +671,7 @@ static struct ctl_table kern_table[] = {
94230 .data = &modprobe_path,
94231 .maxlen = KMOD_PATH_LEN,
94232 .mode = 0644,
94233- .proc_handler = proc_dostring,
94234+ .proc_handler = proc_dostring_modpriv,
94235 },
94236 {
94237 .procname = "modules_disabled",
94238@@ -808,16 +838,20 @@ static struct ctl_table kern_table[] = {
94239 .extra1 = &zero,
94240 .extra2 = &one,
94241 },
94242+#endif
94243 {
94244 .procname = "kptr_restrict",
94245 .data = &kptr_restrict,
94246 .maxlen = sizeof(int),
94247 .mode = 0644,
94248 .proc_handler = proc_dointvec_minmax_sysadmin,
94249+#ifdef CONFIG_GRKERNSEC_HIDESYM
94250+ .extra1 = &two,
94251+#else
94252 .extra1 = &zero,
94253+#endif
94254 .extra2 = &two,
94255 },
94256-#endif
94257 {
94258 .procname = "ngroups_max",
94259 .data = &ngroups_max,
94260@@ -1073,10 +1107,17 @@ static struct ctl_table kern_table[] = {
94261 */
94262 {
94263 .procname = "perf_event_paranoid",
94264- .data = &sysctl_perf_event_paranoid,
94265- .maxlen = sizeof(sysctl_perf_event_paranoid),
94266+ .data = &sysctl_perf_event_legitimately_concerned,
94267+ .maxlen = sizeof(sysctl_perf_event_legitimately_concerned),
94268 .mode = 0644,
94269- .proc_handler = proc_dointvec,
94270+ /* go ahead, be a hero */
94271+ .proc_handler = proc_dointvec_minmax_sysadmin,
94272+ .extra1 = &neg_one,
94273+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
94274+ .extra2 = &three,
94275+#else
94276+ .extra2 = &two,
94277+#endif
94278 },
94279 {
94280 .procname = "perf_event_mlock_kb",
94281@@ -1335,6 +1376,13 @@ static struct ctl_table vm_table[] = {
94282 .proc_handler = proc_dointvec_minmax,
94283 .extra1 = &zero,
94284 },
94285+ {
94286+ .procname = "heap_stack_gap",
94287+ .data = &sysctl_heap_stack_gap,
94288+ .maxlen = sizeof(sysctl_heap_stack_gap),
94289+ .mode = 0644,
94290+ .proc_handler = proc_doulongvec_minmax,
94291+ },
94292 #else
94293 {
94294 .procname = "nr_trim_pages",
94295@@ -1824,6 +1872,16 @@ int proc_dostring(struct ctl_table *table, int write,
94296 (char __user *)buffer, lenp, ppos);
94297 }
94298
94299+int proc_dostring_modpriv(struct ctl_table *table, int write,
94300+ void __user *buffer, size_t *lenp, loff_t *ppos)
94301+{
94302+ if (write && !capable(CAP_SYS_MODULE))
94303+ return -EPERM;
94304+
94305+ return _proc_do_string(table->data, table->maxlen, write,
94306+ buffer, lenp, ppos);
94307+}
94308+
94309 static size_t proc_skip_spaces(char **buf)
94310 {
94311 size_t ret;
94312@@ -1929,6 +1987,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
94313 len = strlen(tmp);
94314 if (len > *size)
94315 len = *size;
94316+ if (len > sizeof(tmp))
94317+ len = sizeof(tmp);
94318 if (copy_to_user(*buf, tmp, len))
94319 return -EFAULT;
94320 *size -= len;
94321@@ -2106,7 +2166,7 @@ int proc_dointvec(struct ctl_table *table, int write,
94322 static int proc_taint(struct ctl_table *table, int write,
94323 void __user *buffer, size_t *lenp, loff_t *ppos)
94324 {
94325- struct ctl_table t;
94326+ ctl_table_no_const t;
94327 unsigned long tmptaint = get_taint();
94328 int err;
94329
94330@@ -2134,7 +2194,6 @@ static int proc_taint(struct ctl_table *table, int write,
94331 return err;
94332 }
94333
94334-#ifdef CONFIG_PRINTK
94335 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
94336 void __user *buffer, size_t *lenp, loff_t *ppos)
94337 {
94338@@ -2143,7 +2202,6 @@ static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
94339
94340 return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
94341 }
94342-#endif
94343
94344 struct do_proc_dointvec_minmax_conv_param {
94345 int *min;
94346@@ -2703,6 +2761,12 @@ int proc_dostring(struct ctl_table *table, int write,
94347 return -ENOSYS;
94348 }
94349
94350+int proc_dostring_modpriv(struct ctl_table *table, int write,
94351+ void __user *buffer, size_t *lenp, loff_t *ppos)
94352+{
94353+ return -ENOSYS;
94354+}
94355+
94356 int proc_dointvec(struct ctl_table *table, int write,
94357 void __user *buffer, size_t *lenp, loff_t *ppos)
94358 {
94359@@ -2759,5 +2823,6 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
94360 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
94361 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
94362 EXPORT_SYMBOL(proc_dostring);
94363+EXPORT_SYMBOL(proc_dostring_modpriv);
94364 EXPORT_SYMBOL(proc_doulongvec_minmax);
94365 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
94366diff --git a/kernel/taskstats.c b/kernel/taskstats.c
94367index 13d2f7c..c93d0b0 100644
94368--- a/kernel/taskstats.c
94369+++ b/kernel/taskstats.c
94370@@ -28,9 +28,12 @@
94371 #include <linux/fs.h>
94372 #include <linux/file.h>
94373 #include <linux/pid_namespace.h>
94374+#include <linux/grsecurity.h>
94375 #include <net/genetlink.h>
94376 #include <linux/atomic.h>
94377
94378+extern int gr_is_taskstats_denied(int pid);
94379+
94380 /*
94381 * Maximum length of a cpumask that can be specified in
94382 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
94383@@ -576,6 +579,9 @@ err:
94384
94385 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
94386 {
94387+ if (gr_is_taskstats_denied(current->pid))
94388+ return -EACCES;
94389+
94390 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
94391 return cmd_attr_register_cpumask(info);
94392 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
94393diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
94394index a7077d3..dd48a49 100644
94395--- a/kernel/time/alarmtimer.c
94396+++ b/kernel/time/alarmtimer.c
94397@@ -823,7 +823,7 @@ static int __init alarmtimer_init(void)
94398 struct platform_device *pdev;
94399 int error = 0;
94400 int i;
94401- struct k_clock alarm_clock = {
94402+ static struct k_clock alarm_clock = {
94403 .clock_getres = alarm_clock_getres,
94404 .clock_get = alarm_clock_get,
94405 .timer_create = alarm_timer_create,
94406diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
94407index 1c2fe7d..ce7483d 100644
94408--- a/kernel/time/hrtimer.c
94409+++ b/kernel/time/hrtimer.c
94410@@ -1399,7 +1399,7 @@ void hrtimer_peek_ahead_timers(void)
94411 local_irq_restore(flags);
94412 }
94413
94414-static void run_hrtimer_softirq(struct softirq_action *h)
94415+static __latent_entropy void run_hrtimer_softirq(void)
94416 {
94417 hrtimer_peek_ahead_timers();
94418 }
94419diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
94420index 3b89464..5e38379 100644
94421--- a/kernel/time/posix-cpu-timers.c
94422+++ b/kernel/time/posix-cpu-timers.c
94423@@ -1464,14 +1464,14 @@ struct k_clock clock_posix_cpu = {
94424
94425 static __init int init_posix_cpu_timers(void)
94426 {
94427- struct k_clock process = {
94428+ static struct k_clock process = {
94429 .clock_getres = process_cpu_clock_getres,
94430 .clock_get = process_cpu_clock_get,
94431 .timer_create = process_cpu_timer_create,
94432 .nsleep = process_cpu_nsleep,
94433 .nsleep_restart = process_cpu_nsleep_restart,
94434 };
94435- struct k_clock thread = {
94436+ static struct k_clock thread = {
94437 .clock_getres = thread_cpu_clock_getres,
94438 .clock_get = thread_cpu_clock_get,
94439 .timer_create = thread_cpu_timer_create,
94440diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
94441index 42b463a..a6b008f 100644
94442--- a/kernel/time/posix-timers.c
94443+++ b/kernel/time/posix-timers.c
94444@@ -43,6 +43,7 @@
94445 #include <linux/hash.h>
94446 #include <linux/posix-clock.h>
94447 #include <linux/posix-timers.h>
94448+#include <linux/grsecurity.h>
94449 #include <linux/syscalls.h>
94450 #include <linux/wait.h>
94451 #include <linux/workqueue.h>
94452@@ -124,7 +125,7 @@ static DEFINE_SPINLOCK(hash_lock);
94453 * which we beg off on and pass to do_sys_settimeofday().
94454 */
94455
94456-static struct k_clock posix_clocks[MAX_CLOCKS];
94457+static struct k_clock *posix_clocks[MAX_CLOCKS];
94458
94459 /*
94460 * These ones are defined below.
94461@@ -277,7 +278,7 @@ static int posix_get_tai(clockid_t which_clock, struct timespec *tp)
94462 */
94463 static __init int init_posix_timers(void)
94464 {
94465- struct k_clock clock_realtime = {
94466+ static struct k_clock clock_realtime = {
94467 .clock_getres = hrtimer_get_res,
94468 .clock_get = posix_clock_realtime_get,
94469 .clock_set = posix_clock_realtime_set,
94470@@ -289,7 +290,7 @@ static __init int init_posix_timers(void)
94471 .timer_get = common_timer_get,
94472 .timer_del = common_timer_del,
94473 };
94474- struct k_clock clock_monotonic = {
94475+ static struct k_clock clock_monotonic = {
94476 .clock_getres = hrtimer_get_res,
94477 .clock_get = posix_ktime_get_ts,
94478 .nsleep = common_nsleep,
94479@@ -299,19 +300,19 @@ static __init int init_posix_timers(void)
94480 .timer_get = common_timer_get,
94481 .timer_del = common_timer_del,
94482 };
94483- struct k_clock clock_monotonic_raw = {
94484+ static struct k_clock clock_monotonic_raw = {
94485 .clock_getres = hrtimer_get_res,
94486 .clock_get = posix_get_monotonic_raw,
94487 };
94488- struct k_clock clock_realtime_coarse = {
94489+ static struct k_clock clock_realtime_coarse = {
94490 .clock_getres = posix_get_coarse_res,
94491 .clock_get = posix_get_realtime_coarse,
94492 };
94493- struct k_clock clock_monotonic_coarse = {
94494+ static struct k_clock clock_monotonic_coarse = {
94495 .clock_getres = posix_get_coarse_res,
94496 .clock_get = posix_get_monotonic_coarse,
94497 };
94498- struct k_clock clock_tai = {
94499+ static struct k_clock clock_tai = {
94500 .clock_getres = hrtimer_get_res,
94501 .clock_get = posix_get_tai,
94502 .nsleep = common_nsleep,
94503@@ -321,7 +322,7 @@ static __init int init_posix_timers(void)
94504 .timer_get = common_timer_get,
94505 .timer_del = common_timer_del,
94506 };
94507- struct k_clock clock_boottime = {
94508+ static struct k_clock clock_boottime = {
94509 .clock_getres = hrtimer_get_res,
94510 .clock_get = posix_get_boottime,
94511 .nsleep = common_nsleep,
94512@@ -533,7 +534,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
94513 return;
94514 }
94515
94516- posix_clocks[clock_id] = *new_clock;
94517+ posix_clocks[clock_id] = new_clock;
94518 }
94519 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
94520
94521@@ -579,9 +580,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
94522 return (id & CLOCKFD_MASK) == CLOCKFD ?
94523 &clock_posix_dynamic : &clock_posix_cpu;
94524
94525- if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
94526+ if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
94527 return NULL;
94528- return &posix_clocks[id];
94529+ return posix_clocks[id];
94530 }
94531
94532 static int common_timer_create(struct k_itimer *new_timer)
94533@@ -599,7 +600,7 @@ SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
94534 struct k_clock *kc = clockid_to_kclock(which_clock);
94535 struct k_itimer *new_timer;
94536 int error, new_timer_id;
94537- sigevent_t event;
94538+ sigevent_t event = { };
94539 int it_id_set = IT_ID_NOT_SET;
94540
94541 if (!kc)
94542@@ -1013,6 +1014,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
94543 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
94544 return -EFAULT;
94545
94546+ /* only the CLOCK_REALTIME clock can be set, all other clocks
94547+ have their clock_set fptr set to a nosettime dummy function
94548+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
94549+ call common_clock_set, which calls do_sys_settimeofday, which
94550+ we hook
94551+ */
94552+
94553 return kc->clock_set(which_clock, &new_tp);
94554 }
94555
94556diff --git a/kernel/time/time.c b/kernel/time/time.c
94557index a9ae20f..d3fbde7 100644
94558--- a/kernel/time/time.c
94559+++ b/kernel/time/time.c
94560@@ -173,6 +173,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
94561 return error;
94562
94563 if (tz) {
94564+ /* we log in do_settimeofday called below, so don't log twice
94565+ */
94566+ if (!tv)
94567+ gr_log_timechange();
94568+
94569 sys_tz = *tz;
94570 update_vsyscall_tz();
94571 if (firsttime) {
94572diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
94573index ec1791f..6a086cd 100644
94574--- a/kernel/time/timekeeping.c
94575+++ b/kernel/time/timekeeping.c
94576@@ -15,6 +15,7 @@
94577 #include <linux/init.h>
94578 #include <linux/mm.h>
94579 #include <linux/sched.h>
94580+#include <linux/grsecurity.h>
94581 #include <linux/syscore_ops.h>
94582 #include <linux/clocksource.h>
94583 #include <linux/jiffies.h>
94584@@ -717,6 +718,8 @@ int do_settimeofday(const struct timespec *tv)
94585 if (!timespec_valid_strict(tv))
94586 return -EINVAL;
94587
94588+ gr_log_timechange();
94589+
94590 raw_spin_lock_irqsave(&timekeeper_lock, flags);
94591 write_seqcount_begin(&tk_core.seq);
94592
94593diff --git a/kernel/time/timer.c b/kernel/time/timer.c
94594index 9bbb834..3caa8ed 100644
94595--- a/kernel/time/timer.c
94596+++ b/kernel/time/timer.c
94597@@ -1394,7 +1394,7 @@ void update_process_times(int user_tick)
94598 /*
94599 * This function runs timers and the timer-tq in bottom half context.
94600 */
94601-static void run_timer_softirq(struct softirq_action *h)
94602+static __latent_entropy void run_timer_softirq(void)
94603 {
94604 struct tvec_base *base = __this_cpu_read(tvec_bases);
94605
94606@@ -1457,7 +1457,7 @@ static void process_timeout(unsigned long __data)
94607 *
94608 * In all cases the return value is guaranteed to be non-negative.
94609 */
94610-signed long __sched schedule_timeout(signed long timeout)
94611+signed long __sched __intentional_overflow(-1) schedule_timeout(signed long timeout)
94612 {
94613 struct timer_list timer;
94614 unsigned long expire;
94615diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
94616index 61ed862..3b52c65 100644
94617--- a/kernel/time/timer_list.c
94618+++ b/kernel/time/timer_list.c
94619@@ -45,12 +45,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
94620
94621 static void print_name_offset(struct seq_file *m, void *sym)
94622 {
94623+#ifdef CONFIG_GRKERNSEC_HIDESYM
94624+ SEQ_printf(m, "<%p>", NULL);
94625+#else
94626 char symname[KSYM_NAME_LEN];
94627
94628 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
94629 SEQ_printf(m, "<%pK>", sym);
94630 else
94631 SEQ_printf(m, "%s", symname);
94632+#endif
94633 }
94634
94635 static void
94636@@ -119,7 +123,11 @@ next_one:
94637 static void
94638 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
94639 {
94640+#ifdef CONFIG_GRKERNSEC_HIDESYM
94641+ SEQ_printf(m, " .base: %p\n", NULL);
94642+#else
94643 SEQ_printf(m, " .base: %pK\n", base);
94644+#endif
94645 SEQ_printf(m, " .index: %d\n",
94646 base->index);
94647 SEQ_printf(m, " .resolution: %Lu nsecs\n",
94648@@ -362,7 +370,11 @@ static int __init init_timer_list_procfs(void)
94649 {
94650 struct proc_dir_entry *pe;
94651
94652+#ifdef CONFIG_GRKERNSEC_PROC_ADD
94653+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
94654+#else
94655 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
94656+#endif
94657 if (!pe)
94658 return -ENOMEM;
94659 return 0;
94660diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
94661index 1fb08f2..ca4bb1e 100644
94662--- a/kernel/time/timer_stats.c
94663+++ b/kernel/time/timer_stats.c
94664@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
94665 static unsigned long nr_entries;
94666 static struct entry entries[MAX_ENTRIES];
94667
94668-static atomic_t overflow_count;
94669+static atomic_unchecked_t overflow_count;
94670
94671 /*
94672 * The entries are in a hash-table, for fast lookup:
94673@@ -140,7 +140,7 @@ static void reset_entries(void)
94674 nr_entries = 0;
94675 memset(entries, 0, sizeof(entries));
94676 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
94677- atomic_set(&overflow_count, 0);
94678+ atomic_set_unchecked(&overflow_count, 0);
94679 }
94680
94681 static struct entry *alloc_entry(void)
94682@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
94683 if (likely(entry))
94684 entry->count++;
94685 else
94686- atomic_inc(&overflow_count);
94687+ atomic_inc_unchecked(&overflow_count);
94688
94689 out_unlock:
94690 raw_spin_unlock_irqrestore(lock, flags);
94691@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
94692
94693 static void print_name_offset(struct seq_file *m, unsigned long addr)
94694 {
94695+#ifdef CONFIG_GRKERNSEC_HIDESYM
94696+ seq_printf(m, "<%p>", NULL);
94697+#else
94698 char symname[KSYM_NAME_LEN];
94699
94700 if (lookup_symbol_name(addr, symname) < 0)
94701- seq_printf(m, "<%p>", (void *)addr);
94702+ seq_printf(m, "<%pK>", (void *)addr);
94703 else
94704 seq_printf(m, "%s", symname);
94705+#endif
94706 }
94707
94708 static int tstats_show(struct seq_file *m, void *v)
94709@@ -300,8 +304,8 @@ static int tstats_show(struct seq_file *m, void *v)
94710
94711 seq_puts(m, "Timer Stats Version: v0.3\n");
94712 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
94713- if (atomic_read(&overflow_count))
94714- seq_printf(m, "Overflow: %d entries\n", atomic_read(&overflow_count));
94715+ if (atomic_read_unchecked(&overflow_count))
94716+ seq_printf(m, "Overflow: %d entries\n", atomic_read_unchecked(&overflow_count));
94717 seq_printf(m, "Collection: %s\n", timer_stats_active ? "active" : "inactive");
94718
94719 for (i = 0; i < nr_entries; i++) {
94720@@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void)
94721 {
94722 struct proc_dir_entry *pe;
94723
94724+#ifdef CONFIG_GRKERNSEC_PROC_ADD
94725+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
94726+#else
94727 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
94728+#endif
94729 if (!pe)
94730 return -ENOMEM;
94731 return 0;
94732diff --git a/kernel/torture.c b/kernel/torture.c
94733index d600af2..27a4e9d 100644
94734--- a/kernel/torture.c
94735+++ b/kernel/torture.c
94736@@ -484,7 +484,7 @@ static int torture_shutdown_notify(struct notifier_block *unused1,
94737 mutex_lock(&fullstop_mutex);
94738 if (ACCESS_ONCE(fullstop) == FULLSTOP_DONTSTOP) {
94739 VERBOSE_TOROUT_STRING("Unscheduled system shutdown detected");
94740- ACCESS_ONCE(fullstop) = FULLSTOP_SHUTDOWN;
94741+ ACCESS_ONCE_RW(fullstop) = FULLSTOP_SHUTDOWN;
94742 } else {
94743 pr_warn("Concurrent rmmod and shutdown illegal!\n");
94744 }
94745@@ -551,14 +551,14 @@ static int torture_stutter(void *arg)
94746 if (!torture_must_stop()) {
94747 if (stutter > 1) {
94748 schedule_timeout_interruptible(stutter - 1);
94749- ACCESS_ONCE(stutter_pause_test) = 2;
94750+ ACCESS_ONCE_RW(stutter_pause_test) = 2;
94751 }
94752 schedule_timeout_interruptible(1);
94753- ACCESS_ONCE(stutter_pause_test) = 1;
94754+ ACCESS_ONCE_RW(stutter_pause_test) = 1;
94755 }
94756 if (!torture_must_stop())
94757 schedule_timeout_interruptible(stutter);
94758- ACCESS_ONCE(stutter_pause_test) = 0;
94759+ ACCESS_ONCE_RW(stutter_pause_test) = 0;
94760 torture_shutdown_absorb("torture_stutter");
94761 } while (!torture_must_stop());
94762 torture_kthread_stopping("torture_stutter");
94763@@ -645,7 +645,7 @@ bool torture_cleanup(void)
94764 schedule_timeout_uninterruptible(10);
94765 return true;
94766 }
94767- ACCESS_ONCE(fullstop) = FULLSTOP_RMMOD;
94768+ ACCESS_ONCE_RW(fullstop) = FULLSTOP_RMMOD;
94769 mutex_unlock(&fullstop_mutex);
94770 torture_shutdown_cleanup();
94771 torture_shuffle_cleanup();
94772diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
94773index c1bd4ad..4b861dc 100644
94774--- a/kernel/trace/blktrace.c
94775+++ b/kernel/trace/blktrace.c
94776@@ -328,7 +328,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
94777 struct blk_trace *bt = filp->private_data;
94778 char buf[16];
94779
94780- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
94781+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
94782
94783 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
94784 }
94785@@ -386,7 +386,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
94786 return 1;
94787
94788 bt = buf->chan->private_data;
94789- atomic_inc(&bt->dropped);
94790+ atomic_inc_unchecked(&bt->dropped);
94791 return 0;
94792 }
94793
94794@@ -487,7 +487,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
94795
94796 bt->dir = dir;
94797 bt->dev = dev;
94798- atomic_set(&bt->dropped, 0);
94799+ atomic_set_unchecked(&bt->dropped, 0);
94800 INIT_LIST_HEAD(&bt->running_list);
94801
94802 ret = -EIO;
94803diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
94804index 5916a8e..5cd3b1f 100644
94805--- a/kernel/trace/ftrace.c
94806+++ b/kernel/trace/ftrace.c
94807@@ -2128,12 +2128,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
94808 if (unlikely(ftrace_disabled))
94809 return 0;
94810
94811+ ret = ftrace_arch_code_modify_prepare();
94812+ FTRACE_WARN_ON(ret);
94813+ if (ret)
94814+ return 0;
94815+
94816 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
94817+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
94818 if (ret) {
94819 ftrace_bug(ret, ip);
94820- return 0;
94821 }
94822- return 1;
94823+ return ret ? 0 : 1;
94824 }
94825
94826 /*
94827@@ -4458,8 +4463,10 @@ static int ftrace_process_locs(struct module *mod,
94828 if (!count)
94829 return 0;
94830
94831+ pax_open_kernel();
94832 sort(start, count, sizeof(*start),
94833 ftrace_cmp_ips, ftrace_swap_ips);
94834+ pax_close_kernel();
94835
94836 start_pg = ftrace_allocate_pages(count);
94837 if (!start_pg)
94838diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
94839index 2d75c94..5ef6d32 100644
94840--- a/kernel/trace/ring_buffer.c
94841+++ b/kernel/trace/ring_buffer.c
94842@@ -352,9 +352,9 @@ struct buffer_data_page {
94843 */
94844 struct buffer_page {
94845 struct list_head list; /* list of buffer pages */
94846- local_t write; /* index for next write */
94847+ local_unchecked_t write; /* index for next write */
94848 unsigned read; /* index for next read */
94849- local_t entries; /* entries on this page */
94850+ local_unchecked_t entries; /* entries on this page */
94851 unsigned long real_end; /* real end of data */
94852 struct buffer_data_page *page; /* Actual data page */
94853 };
94854@@ -473,8 +473,8 @@ struct ring_buffer_per_cpu {
94855 unsigned long last_overrun;
94856 local_t entries_bytes;
94857 local_t entries;
94858- local_t overrun;
94859- local_t commit_overrun;
94860+ local_unchecked_t overrun;
94861+ local_unchecked_t commit_overrun;
94862 local_t dropped_events;
94863 local_t committing;
94864 local_t commits;
94865@@ -1005,8 +1005,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
94866 *
94867 * We add a counter to the write field to denote this.
94868 */
94869- old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
94870- old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
94871+ old_write = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->write);
94872+ old_entries = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->entries);
94873
94874 /*
94875 * Just make sure we have seen our old_write and synchronize
94876@@ -1034,8 +1034,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
94877 * cmpxchg to only update if an interrupt did not already
94878 * do it for us. If the cmpxchg fails, we don't care.
94879 */
94880- (void)local_cmpxchg(&next_page->write, old_write, val);
94881- (void)local_cmpxchg(&next_page->entries, old_entries, eval);
94882+ (void)local_cmpxchg_unchecked(&next_page->write, old_write, val);
94883+ (void)local_cmpxchg_unchecked(&next_page->entries, old_entries, eval);
94884
94885 /*
94886 * No need to worry about races with clearing out the commit.
94887@@ -1402,12 +1402,12 @@ static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
94888
94889 static inline unsigned long rb_page_entries(struct buffer_page *bpage)
94890 {
94891- return local_read(&bpage->entries) & RB_WRITE_MASK;
94892+ return local_read_unchecked(&bpage->entries) & RB_WRITE_MASK;
94893 }
94894
94895 static inline unsigned long rb_page_write(struct buffer_page *bpage)
94896 {
94897- return local_read(&bpage->write) & RB_WRITE_MASK;
94898+ return local_read_unchecked(&bpage->write) & RB_WRITE_MASK;
94899 }
94900
94901 static int
94902@@ -1502,7 +1502,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
94903 * bytes consumed in ring buffer from here.
94904 * Increment overrun to account for the lost events.
94905 */
94906- local_add(page_entries, &cpu_buffer->overrun);
94907+ local_add_unchecked(page_entries, &cpu_buffer->overrun);
94908 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
94909 }
94910
94911@@ -2064,7 +2064,7 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
94912 * it is our responsibility to update
94913 * the counters.
94914 */
94915- local_add(entries, &cpu_buffer->overrun);
94916+ local_add_unchecked(entries, &cpu_buffer->overrun);
94917 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
94918
94919 /*
94920@@ -2214,7 +2214,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
94921 if (tail == BUF_PAGE_SIZE)
94922 tail_page->real_end = 0;
94923
94924- local_sub(length, &tail_page->write);
94925+ local_sub_unchecked(length, &tail_page->write);
94926 return;
94927 }
94928
94929@@ -2249,7 +2249,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
94930 rb_event_set_padding(event);
94931
94932 /* Set the write back to the previous setting */
94933- local_sub(length, &tail_page->write);
94934+ local_sub_unchecked(length, &tail_page->write);
94935 return;
94936 }
94937
94938@@ -2261,7 +2261,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
94939
94940 /* Set write to end of buffer */
94941 length = (tail + length) - BUF_PAGE_SIZE;
94942- local_sub(length, &tail_page->write);
94943+ local_sub_unchecked(length, &tail_page->write);
94944 }
94945
94946 /*
94947@@ -2287,7 +2287,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
94948 * about it.
94949 */
94950 if (unlikely(next_page == commit_page)) {
94951- local_inc(&cpu_buffer->commit_overrun);
94952+ local_inc_unchecked(&cpu_buffer->commit_overrun);
94953 goto out_reset;
94954 }
94955
94956@@ -2343,7 +2343,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
94957 cpu_buffer->tail_page) &&
94958 (cpu_buffer->commit_page ==
94959 cpu_buffer->reader_page))) {
94960- local_inc(&cpu_buffer->commit_overrun);
94961+ local_inc_unchecked(&cpu_buffer->commit_overrun);
94962 goto out_reset;
94963 }
94964 }
94965@@ -2391,7 +2391,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
94966 length += RB_LEN_TIME_EXTEND;
94967
94968 tail_page = cpu_buffer->tail_page;
94969- write = local_add_return(length, &tail_page->write);
94970+ write = local_add_return_unchecked(length, &tail_page->write);
94971
94972 /* set write to only the index of the write */
94973 write &= RB_WRITE_MASK;
94974@@ -2415,7 +2415,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
94975 kmemcheck_annotate_bitfield(event, bitfield);
94976 rb_update_event(cpu_buffer, event, length, add_timestamp, delta);
94977
94978- local_inc(&tail_page->entries);
94979+ local_inc_unchecked(&tail_page->entries);
94980
94981 /*
94982 * If this is the first commit on the page, then update
94983@@ -2448,7 +2448,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
94984
94985 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
94986 unsigned long write_mask =
94987- local_read(&bpage->write) & ~RB_WRITE_MASK;
94988+ local_read_unchecked(&bpage->write) & ~RB_WRITE_MASK;
94989 unsigned long event_length = rb_event_length(event);
94990 /*
94991 * This is on the tail page. It is possible that
94992@@ -2458,7 +2458,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
94993 */
94994 old_index += write_mask;
94995 new_index += write_mask;
94996- index = local_cmpxchg(&bpage->write, old_index, new_index);
94997+ index = local_cmpxchg_unchecked(&bpage->write, old_index, new_index);
94998 if (index == old_index) {
94999 /* update counters */
95000 local_sub(event_length, &cpu_buffer->entries_bytes);
95001@@ -2850,7 +2850,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
95002
95003 /* Do the likely case first */
95004 if (likely(bpage->page == (void *)addr)) {
95005- local_dec(&bpage->entries);
95006+ local_dec_unchecked(&bpage->entries);
95007 return;
95008 }
95009
95010@@ -2862,7 +2862,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
95011 start = bpage;
95012 do {
95013 if (bpage->page == (void *)addr) {
95014- local_dec(&bpage->entries);
95015+ local_dec_unchecked(&bpage->entries);
95016 return;
95017 }
95018 rb_inc_page(cpu_buffer, &bpage);
95019@@ -3146,7 +3146,7 @@ static inline unsigned long
95020 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
95021 {
95022 return local_read(&cpu_buffer->entries) -
95023- (local_read(&cpu_buffer->overrun) + cpu_buffer->read);
95024+ (local_read_unchecked(&cpu_buffer->overrun) + cpu_buffer->read);
95025 }
95026
95027 /**
95028@@ -3235,7 +3235,7 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
95029 return 0;
95030
95031 cpu_buffer = buffer->buffers[cpu];
95032- ret = local_read(&cpu_buffer->overrun);
95033+ ret = local_read_unchecked(&cpu_buffer->overrun);
95034
95035 return ret;
95036 }
95037@@ -3258,7 +3258,7 @@ ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
95038 return 0;
95039
95040 cpu_buffer = buffer->buffers[cpu];
95041- ret = local_read(&cpu_buffer->commit_overrun);
95042+ ret = local_read_unchecked(&cpu_buffer->commit_overrun);
95043
95044 return ret;
95045 }
95046@@ -3343,7 +3343,7 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
95047 /* if you care about this being correct, lock the buffer */
95048 for_each_buffer_cpu(buffer, cpu) {
95049 cpu_buffer = buffer->buffers[cpu];
95050- overruns += local_read(&cpu_buffer->overrun);
95051+ overruns += local_read_unchecked(&cpu_buffer->overrun);
95052 }
95053
95054 return overruns;
95055@@ -3514,8 +3514,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
95056 /*
95057 * Reset the reader page to size zero.
95058 */
95059- local_set(&cpu_buffer->reader_page->write, 0);
95060- local_set(&cpu_buffer->reader_page->entries, 0);
95061+ local_set_unchecked(&cpu_buffer->reader_page->write, 0);
95062+ local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
95063 local_set(&cpu_buffer->reader_page->page->commit, 0);
95064 cpu_buffer->reader_page->real_end = 0;
95065
95066@@ -3549,7 +3549,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
95067 * want to compare with the last_overrun.
95068 */
95069 smp_mb();
95070- overwrite = local_read(&(cpu_buffer->overrun));
95071+ overwrite = local_read_unchecked(&(cpu_buffer->overrun));
95072
95073 /*
95074 * Here's the tricky part.
95075@@ -4121,8 +4121,8 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
95076
95077 cpu_buffer->head_page
95078 = list_entry(cpu_buffer->pages, struct buffer_page, list);
95079- local_set(&cpu_buffer->head_page->write, 0);
95080- local_set(&cpu_buffer->head_page->entries, 0);
95081+ local_set_unchecked(&cpu_buffer->head_page->write, 0);
95082+ local_set_unchecked(&cpu_buffer->head_page->entries, 0);
95083 local_set(&cpu_buffer->head_page->page->commit, 0);
95084
95085 cpu_buffer->head_page->read = 0;
95086@@ -4132,14 +4132,14 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
95087
95088 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
95089 INIT_LIST_HEAD(&cpu_buffer->new_pages);
95090- local_set(&cpu_buffer->reader_page->write, 0);
95091- local_set(&cpu_buffer->reader_page->entries, 0);
95092+ local_set_unchecked(&cpu_buffer->reader_page->write, 0);
95093+ local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
95094 local_set(&cpu_buffer->reader_page->page->commit, 0);
95095 cpu_buffer->reader_page->read = 0;
95096
95097 local_set(&cpu_buffer->entries_bytes, 0);
95098- local_set(&cpu_buffer->overrun, 0);
95099- local_set(&cpu_buffer->commit_overrun, 0);
95100+ local_set_unchecked(&cpu_buffer->overrun, 0);
95101+ local_set_unchecked(&cpu_buffer->commit_overrun, 0);
95102 local_set(&cpu_buffer->dropped_events, 0);
95103 local_set(&cpu_buffer->entries, 0);
95104 local_set(&cpu_buffer->committing, 0);
95105@@ -4544,8 +4544,8 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
95106 rb_init_page(bpage);
95107 bpage = reader->page;
95108 reader->page = *data_page;
95109- local_set(&reader->write, 0);
95110- local_set(&reader->entries, 0);
95111+ local_set_unchecked(&reader->write, 0);
95112+ local_set_unchecked(&reader->entries, 0);
95113 reader->read = 0;
95114 *data_page = bpage;
95115
95116diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
95117index 8a52839..dd6d7c8 100644
95118--- a/kernel/trace/trace.c
95119+++ b/kernel/trace/trace.c
95120@@ -3487,7 +3487,7 @@ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
95121 return 0;
95122 }
95123
95124-int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
95125+int set_tracer_flag(struct trace_array *tr, unsigned long mask, int enabled)
95126 {
95127 /* do nothing if flag is already set */
95128 if (!!(trace_flags & mask) == !!enabled)
95129diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
95130index 385391f..8d2250f 100644
95131--- a/kernel/trace/trace.h
95132+++ b/kernel/trace/trace.h
95133@@ -1280,7 +1280,7 @@ extern const char *__stop___tracepoint_str[];
95134 void trace_printk_init_buffers(void);
95135 void trace_printk_start_comm(void);
95136 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
95137-int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
95138+int set_tracer_flag(struct trace_array *tr, unsigned long mask, int enabled);
95139
95140 /*
95141 * Normal trace_printk() and friends allocates special buffers
95142diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
95143index 57b67b1..66082a9 100644
95144--- a/kernel/trace/trace_clock.c
95145+++ b/kernel/trace/trace_clock.c
95146@@ -124,7 +124,7 @@ u64 notrace trace_clock_global(void)
95147 return now;
95148 }
95149
95150-static atomic64_t trace_counter;
95151+static atomic64_unchecked_t trace_counter;
95152
95153 /*
95154 * trace_clock_counter(): simply an atomic counter.
95155@@ -133,5 +133,5 @@ static atomic64_t trace_counter;
95156 */
95157 u64 notrace trace_clock_counter(void)
95158 {
95159- return atomic64_add_return(1, &trace_counter);
95160+ return atomic64_inc_return_unchecked(&trace_counter);
95161 }
95162diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
95163index ef06ce7..3ea161d 100644
95164--- a/kernel/trace/trace_events.c
95165+++ b/kernel/trace/trace_events.c
95166@@ -1720,7 +1720,6 @@ __trace_early_add_new_event(struct ftrace_event_call *call,
95167 return 0;
95168 }
95169
95170-struct ftrace_module_file_ops;
95171 static void __add_event_to_tracers(struct ftrace_event_call *call);
95172
95173 /* Add an additional event_call dynamically */
95174diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
95175index 0abd9b8..6a663a2 100644
95176--- a/kernel/trace/trace_mmiotrace.c
95177+++ b/kernel/trace/trace_mmiotrace.c
95178@@ -24,7 +24,7 @@ struct header_iter {
95179 static struct trace_array *mmio_trace_array;
95180 static bool overrun_detected;
95181 static unsigned long prev_overruns;
95182-static atomic_t dropped_count;
95183+static atomic_unchecked_t dropped_count;
95184
95185 static void mmio_reset_data(struct trace_array *tr)
95186 {
95187@@ -127,7 +127,7 @@ static void mmio_close(struct trace_iterator *iter)
95188
95189 static unsigned long count_overruns(struct trace_iterator *iter)
95190 {
95191- unsigned long cnt = atomic_xchg(&dropped_count, 0);
95192+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
95193 unsigned long over = ring_buffer_overruns(iter->trace_buffer->buffer);
95194
95195 if (over > prev_overruns)
95196@@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
95197 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
95198 sizeof(*entry), 0, pc);
95199 if (!event) {
95200- atomic_inc(&dropped_count);
95201+ atomic_inc_unchecked(&dropped_count);
95202 return;
95203 }
95204 entry = ring_buffer_event_data(event);
95205@@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
95206 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
95207 sizeof(*entry), 0, pc);
95208 if (!event) {
95209- atomic_inc(&dropped_count);
95210+ atomic_inc_unchecked(&dropped_count);
95211 return;
95212 }
95213 entry = ring_buffer_event_data(event);
95214diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
95215index c6977d5..d243785 100644
95216--- a/kernel/trace/trace_output.c
95217+++ b/kernel/trace/trace_output.c
95218@@ -712,14 +712,16 @@ int register_ftrace_event(struct trace_event *event)
95219 goto out;
95220 }
95221
95222+ pax_open_kernel();
95223 if (event->funcs->trace == NULL)
95224- event->funcs->trace = trace_nop_print;
95225+ *(void **)&event->funcs->trace = trace_nop_print;
95226 if (event->funcs->raw == NULL)
95227- event->funcs->raw = trace_nop_print;
95228+ *(void **)&event->funcs->raw = trace_nop_print;
95229 if (event->funcs->hex == NULL)
95230- event->funcs->hex = trace_nop_print;
95231+ *(void **)&event->funcs->hex = trace_nop_print;
95232 if (event->funcs->binary == NULL)
95233- event->funcs->binary = trace_nop_print;
95234+ *(void **)&event->funcs->binary = trace_nop_print;
95235+ pax_close_kernel();
95236
95237 key = event->type & (EVENT_HASHSIZE - 1);
95238
95239diff --git a/kernel/trace/trace_seq.c b/kernel/trace/trace_seq.c
95240index 1f24ed9..10407ec 100644
95241--- a/kernel/trace/trace_seq.c
95242+++ b/kernel/trace/trace_seq.c
95243@@ -367,7 +367,7 @@ int trace_seq_path(struct trace_seq *s, const struct path *path)
95244
95245 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
95246 if (!IS_ERR(p)) {
95247- p = mangle_path(s->buffer + s->len, p, "\n");
95248+ p = mangle_path(s->buffer + s->len, p, "\n\\");
95249 if (p) {
95250 s->len = p - s->buffer;
95251 return 1;
95252diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
95253index 8a4e5cb..64f270d 100644
95254--- a/kernel/trace/trace_stack.c
95255+++ b/kernel/trace/trace_stack.c
95256@@ -91,7 +91,7 @@ check_stack(unsigned long ip, unsigned long *stack)
95257 return;
95258
95259 /* we do not handle interrupt stacks yet */
95260- if (!object_is_on_stack(stack))
95261+ if (!object_starts_on_stack(stack))
95262 return;
95263
95264 local_irq_save(flags);
95265diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
95266index aa312b0..395f343 100644
95267--- a/kernel/user_namespace.c
95268+++ b/kernel/user_namespace.c
95269@@ -82,6 +82,21 @@ int create_user_ns(struct cred *new)
95270 !kgid_has_mapping(parent_ns, group))
95271 return -EPERM;
95272
95273+#ifdef CONFIG_GRKERNSEC
95274+ /*
95275+ * This doesn't really inspire confidence:
95276+ * http://marc.info/?l=linux-kernel&m=135543612731939&w=2
95277+ * http://marc.info/?l=linux-kernel&m=135545831607095&w=2
95278+ * Increases kernel attack surface in areas developers
95279+ * previously cared little about ("low importance due
95280+ * to requiring "root" capability")
95281+ * To be removed when this code receives *proper* review
95282+ */
95283+ if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) ||
95284+ !capable(CAP_SETGID))
95285+ return -EPERM;
95286+#endif
95287+
95288 ns = kmem_cache_zalloc(user_ns_cachep, GFP_KERNEL);
95289 if (!ns)
95290 return -ENOMEM;
95291@@ -872,7 +887,7 @@ static int userns_install(struct nsproxy *nsproxy, void *ns)
95292 if (atomic_read(&current->mm->mm_users) > 1)
95293 return -EINVAL;
95294
95295- if (current->fs->users != 1)
95296+ if (atomic_read(&current->fs->users) != 1)
95297 return -EINVAL;
95298
95299 if (!ns_capable(user_ns, CAP_SYS_ADMIN))
95300diff --git a/kernel/utsname_sysctl.c b/kernel/utsname_sysctl.c
95301index c8eac43..4b5f08f 100644
95302--- a/kernel/utsname_sysctl.c
95303+++ b/kernel/utsname_sysctl.c
95304@@ -47,7 +47,7 @@ static void put_uts(struct ctl_table *table, int write, void *which)
95305 static int proc_do_uts_string(struct ctl_table *table, int write,
95306 void __user *buffer, size_t *lenp, loff_t *ppos)
95307 {
95308- struct ctl_table uts_table;
95309+ ctl_table_no_const uts_table;
95310 int r;
95311 memcpy(&uts_table, table, sizeof(uts_table));
95312 uts_table.data = get_uts(table, write);
95313diff --git a/kernel/watchdog.c b/kernel/watchdog.c
95314index a8d6914..8fbdb13 100644
95315--- a/kernel/watchdog.c
95316+++ b/kernel/watchdog.c
95317@@ -521,7 +521,7 @@ static int watchdog_nmi_enable(unsigned int cpu) { return 0; }
95318 static void watchdog_nmi_disable(unsigned int cpu) { return; }
95319 #endif /* CONFIG_HARDLOCKUP_DETECTOR */
95320
95321-static struct smp_hotplug_thread watchdog_threads = {
95322+static struct smp_hotplug_thread watchdog_threads __read_only = {
95323 .store = &softlockup_watchdog,
95324 .thread_should_run = watchdog_should_run,
95325 .thread_fn = watchdog,
95326diff --git a/kernel/workqueue.c b/kernel/workqueue.c
95327index 5dbe22a..872413c 100644
95328--- a/kernel/workqueue.c
95329+++ b/kernel/workqueue.c
95330@@ -4507,7 +4507,7 @@ static void rebind_workers(struct worker_pool *pool)
95331 WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND));
95332 worker_flags |= WORKER_REBOUND;
95333 worker_flags &= ~WORKER_UNBOUND;
95334- ACCESS_ONCE(worker->flags) = worker_flags;
95335+ ACCESS_ONCE_RW(worker->flags) = worker_flags;
95336 }
95337
95338 spin_unlock_irq(&pool->lock);
95339diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
95340index a285900..5e3b26b 100644
95341--- a/lib/Kconfig.debug
95342+++ b/lib/Kconfig.debug
95343@@ -882,7 +882,7 @@ config DEBUG_MUTEXES
95344
95345 config DEBUG_WW_MUTEX_SLOWPATH
95346 bool "Wait/wound mutex debugging: Slowpath testing"
95347- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
95348+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
95349 select DEBUG_LOCK_ALLOC
95350 select DEBUG_SPINLOCK
95351 select DEBUG_MUTEXES
95352@@ -899,7 +899,7 @@ config DEBUG_WW_MUTEX_SLOWPATH
95353
95354 config DEBUG_LOCK_ALLOC
95355 bool "Lock debugging: detect incorrect freeing of live locks"
95356- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
95357+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
95358 select DEBUG_SPINLOCK
95359 select DEBUG_MUTEXES
95360 select LOCKDEP
95361@@ -913,7 +913,7 @@ config DEBUG_LOCK_ALLOC
95362
95363 config PROVE_LOCKING
95364 bool "Lock debugging: prove locking correctness"
95365- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
95366+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
95367 select LOCKDEP
95368 select DEBUG_SPINLOCK
95369 select DEBUG_MUTEXES
95370@@ -964,7 +964,7 @@ config LOCKDEP
95371
95372 config LOCK_STAT
95373 bool "Lock usage statistics"
95374- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
95375+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
95376 select LOCKDEP
95377 select DEBUG_SPINLOCK
95378 select DEBUG_MUTEXES
95379@@ -1437,6 +1437,7 @@ config LATENCYTOP
95380 depends on DEBUG_KERNEL
95381 depends on STACKTRACE_SUPPORT
95382 depends on PROC_FS
95383+ depends on !GRKERNSEC_HIDESYM
95384 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !ARC
95385 select KALLSYMS
95386 select KALLSYMS_ALL
95387@@ -1453,7 +1454,7 @@ config ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
95388 config DEBUG_STRICT_USER_COPY_CHECKS
95389 bool "Strict user copy size checks"
95390 depends on ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
95391- depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING
95392+ depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING && !PAX_SIZE_OVERFLOW
95393 help
95394 Enabling this option turns a certain set of sanity checks for user
95395 copy operations into compile time failures.
95396@@ -1581,7 +1582,7 @@ endmenu # runtime tests
95397
95398 config PROVIDE_OHCI1394_DMA_INIT
95399 bool "Remote debugging over FireWire early on boot"
95400- depends on PCI && X86
95401+ depends on PCI && X86 && !GRKERNSEC
95402 help
95403 If you want to debug problems which hang or crash the kernel early
95404 on boot and the crashing machine has a FireWire port, you can use
95405diff --git a/lib/Makefile b/lib/Makefile
95406index d6b4bc4..a3724eb 100644
95407--- a/lib/Makefile
95408+++ b/lib/Makefile
95409@@ -55,7 +55,7 @@ obj-$(CONFIG_BTREE) += btree.o
95410 obj-$(CONFIG_INTERVAL_TREE) += interval_tree.o
95411 obj-$(CONFIG_ASSOCIATIVE_ARRAY) += assoc_array.o
95412 obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
95413-obj-$(CONFIG_DEBUG_LIST) += list_debug.o
95414+obj-y += list_debug.o
95415 obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o
95416
95417 ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
95418diff --git a/lib/average.c b/lib/average.c
95419index 114d1be..ab0350c 100644
95420--- a/lib/average.c
95421+++ b/lib/average.c
95422@@ -55,7 +55,7 @@ struct ewma *ewma_add(struct ewma *avg, unsigned long val)
95423 {
95424 unsigned long internal = ACCESS_ONCE(avg->internal);
95425
95426- ACCESS_ONCE(avg->internal) = internal ?
95427+ ACCESS_ONCE_RW(avg->internal) = internal ?
95428 (((internal << avg->weight) - internal) +
95429 (val << avg->factor)) >> avg->weight :
95430 (val << avg->factor);
95431diff --git a/lib/bitmap.c b/lib/bitmap.c
95432index 1e031f2..89e3d6f 100644
95433--- a/lib/bitmap.c
95434+++ b/lib/bitmap.c
95435@@ -131,7 +131,9 @@ void __bitmap_shift_right(unsigned long *dst,
95436 lower = src[off + k];
95437 if (left && off + k == lim - 1)
95438 lower &= mask;
95439- dst[k] = upper << (BITS_PER_LONG - rem) | lower >> rem;
95440+ dst[k] = lower >> rem;
95441+ if (rem)
95442+ dst[k] |= upper << (BITS_PER_LONG - rem);
95443 if (left && k == lim - 1)
95444 dst[k] &= mask;
95445 }
95446@@ -172,7 +174,9 @@ void __bitmap_shift_left(unsigned long *dst,
95447 upper = src[k];
95448 if (left && k == lim - 1)
95449 upper &= (1UL << left) - 1;
95450- dst[k + off] = lower >> (BITS_PER_LONG - rem) | upper << rem;
95451+ dst[k + off] = upper << rem;
95452+ if (rem)
95453+ dst[k + off] |= lower >> (BITS_PER_LONG - rem);
95454 if (left && k + off == lim - 1)
95455 dst[k + off] &= (1UL << left) - 1;
95456 }
95457@@ -429,7 +433,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
95458 {
95459 int c, old_c, totaldigits, ndigits, nchunks, nbits;
95460 u32 chunk;
95461- const char __user __force *ubuf = (const char __user __force *)buf;
95462+ const char __user *ubuf = (const char __force_user *)buf;
95463
95464 bitmap_zero(maskp, nmaskbits);
95465
95466@@ -514,7 +518,7 @@ int bitmap_parse_user(const char __user *ubuf,
95467 {
95468 if (!access_ok(VERIFY_READ, ubuf, ulen))
95469 return -EFAULT;
95470- return __bitmap_parse((const char __force *)ubuf,
95471+ return __bitmap_parse((const char __force_kernel *)ubuf,
95472 ulen, 1, maskp, nmaskbits);
95473
95474 }
95475@@ -605,7 +609,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
95476 {
95477 unsigned a, b;
95478 int c, old_c, totaldigits;
95479- const char __user __force *ubuf = (const char __user __force *)buf;
95480+ const char __user *ubuf = (const char __force_user *)buf;
95481 int exp_digit, in_range;
95482
95483 totaldigits = c = 0;
95484@@ -700,7 +704,7 @@ int bitmap_parselist_user(const char __user *ubuf,
95485 {
95486 if (!access_ok(VERIFY_READ, ubuf, ulen))
95487 return -EFAULT;
95488- return __bitmap_parselist((const char __force *)ubuf,
95489+ return __bitmap_parselist((const char __force_kernel *)ubuf,
95490 ulen, 1, maskp, nmaskbits);
95491 }
95492 EXPORT_SYMBOL(bitmap_parselist_user);
95493diff --git a/lib/bug.c b/lib/bug.c
95494index d1d7c78..b354235 100644
95495--- a/lib/bug.c
95496+++ b/lib/bug.c
95497@@ -137,6 +137,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
95498 return BUG_TRAP_TYPE_NONE;
95499
95500 bug = find_bug(bugaddr);
95501+ if (!bug)
95502+ return BUG_TRAP_TYPE_NONE;
95503
95504 file = NULL;
95505 line = 0;
95506diff --git a/lib/debugobjects.c b/lib/debugobjects.c
95507index 547f7f9..a6d4ba0 100644
95508--- a/lib/debugobjects.c
95509+++ b/lib/debugobjects.c
95510@@ -289,7 +289,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
95511 if (limit > 4)
95512 return;
95513
95514- is_on_stack = object_is_on_stack(addr);
95515+ is_on_stack = object_starts_on_stack(addr);
95516 if (is_on_stack == onstack)
95517 return;
95518
95519diff --git a/lib/div64.c b/lib/div64.c
95520index 4382ad7..08aa558 100644
95521--- a/lib/div64.c
95522+++ b/lib/div64.c
95523@@ -59,7 +59,7 @@ uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base)
95524 EXPORT_SYMBOL(__div64_32);
95525
95526 #ifndef div_s64_rem
95527-s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
95528+s64 __intentional_overflow(-1) div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
95529 {
95530 u64 quotient;
95531
95532@@ -130,7 +130,7 @@ EXPORT_SYMBOL(div64_u64_rem);
95533 * 'http://www.hackersdelight.org/HDcode/newCode/divDouble.c.txt'
95534 */
95535 #ifndef div64_u64
95536-u64 div64_u64(u64 dividend, u64 divisor)
95537+u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor)
95538 {
95539 u32 high = divisor >> 32;
95540 u64 quot;
95541diff --git a/lib/dma-debug.c b/lib/dma-debug.c
95542index 98f2d7e..899da5c 100644
95543--- a/lib/dma-debug.c
95544+++ b/lib/dma-debug.c
95545@@ -971,7 +971,7 @@ static int dma_debug_device_change(struct notifier_block *nb, unsigned long acti
95546
95547 void dma_debug_add_bus(struct bus_type *bus)
95548 {
95549- struct notifier_block *nb;
95550+ notifier_block_no_const *nb;
95551
95552 if (global_disable)
95553 return;
95554@@ -1148,7 +1148,7 @@ static void check_unmap(struct dma_debug_entry *ref)
95555
95556 static void check_for_stack(struct device *dev, void *addr)
95557 {
95558- if (object_is_on_stack(addr))
95559+ if (object_starts_on_stack(addr))
95560 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
95561 "stack [addr=%p]\n", addr);
95562 }
95563diff --git a/lib/hash.c b/lib/hash.c
95564index fea973f..386626f 100644
95565--- a/lib/hash.c
95566+++ b/lib/hash.c
95567@@ -14,7 +14,7 @@
95568 #include <linux/hash.h>
95569 #include <linux/cache.h>
95570
95571-static struct fast_hash_ops arch_hash_ops __read_mostly = {
95572+static struct fast_hash_ops arch_hash_ops __read_only = {
95573 .hash = jhash,
95574 .hash2 = jhash2,
95575 };
95576diff --git a/lib/inflate.c b/lib/inflate.c
95577index 013a761..c28f3fc 100644
95578--- a/lib/inflate.c
95579+++ b/lib/inflate.c
95580@@ -269,7 +269,7 @@ static void free(void *where)
95581 malloc_ptr = free_mem_ptr;
95582 }
95583 #else
95584-#define malloc(a) kmalloc(a, GFP_KERNEL)
95585+#define malloc(a) kmalloc((a), GFP_KERNEL)
95586 #define free(a) kfree(a)
95587 #endif
95588
95589diff --git a/lib/ioremap.c b/lib/ioremap.c
95590index 0c9216c..863bd89 100644
95591--- a/lib/ioremap.c
95592+++ b/lib/ioremap.c
95593@@ -38,7 +38,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
95594 unsigned long next;
95595
95596 phys_addr -= addr;
95597- pmd = pmd_alloc(&init_mm, pud, addr);
95598+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
95599 if (!pmd)
95600 return -ENOMEM;
95601 do {
95602@@ -56,7 +56,7 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
95603 unsigned long next;
95604
95605 phys_addr -= addr;
95606- pud = pud_alloc(&init_mm, pgd, addr);
95607+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
95608 if (!pud)
95609 return -ENOMEM;
95610 do {
95611diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
95612index bd2bea9..6b3c95e 100644
95613--- a/lib/is_single_threaded.c
95614+++ b/lib/is_single_threaded.c
95615@@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
95616 struct task_struct *p, *t;
95617 bool ret;
95618
95619+ if (!mm)
95620+ return true;
95621+
95622 if (atomic_read(&task->signal->live) != 1)
95623 return false;
95624
95625diff --git a/lib/kobject.c b/lib/kobject.c
95626index 58751bb..93a1853 100644
95627--- a/lib/kobject.c
95628+++ b/lib/kobject.c
95629@@ -931,9 +931,9 @@ EXPORT_SYMBOL_GPL(kset_create_and_add);
95630
95631
95632 static DEFINE_SPINLOCK(kobj_ns_type_lock);
95633-static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES];
95634+static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES] __read_only;
95635
95636-int kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
95637+int __init kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
95638 {
95639 enum kobj_ns_type type = ops->type;
95640 int error;
95641diff --git a/lib/list_debug.c b/lib/list_debug.c
95642index c24c2f7..f0296f4 100644
95643--- a/lib/list_debug.c
95644+++ b/lib/list_debug.c
95645@@ -11,7 +11,9 @@
95646 #include <linux/bug.h>
95647 #include <linux/kernel.h>
95648 #include <linux/rculist.h>
95649+#include <linux/mm.h>
95650
95651+#ifdef CONFIG_DEBUG_LIST
95652 /*
95653 * Insert a new entry between two known consecutive entries.
95654 *
95655@@ -19,21 +21,40 @@
95656 * the prev/next entries already!
95657 */
95658
95659+static bool __list_add_debug(struct list_head *new,
95660+ struct list_head *prev,
95661+ struct list_head *next)
95662+{
95663+ if (unlikely(next->prev != prev)) {
95664+ printk(KERN_ERR "list_add corruption. next->prev should be "
95665+ "prev (%p), but was %p. (next=%p).\n",
95666+ prev, next->prev, next);
95667+ BUG();
95668+ return false;
95669+ }
95670+ if (unlikely(prev->next != next)) {
95671+ printk(KERN_ERR "list_add corruption. prev->next should be "
95672+ "next (%p), but was %p. (prev=%p).\n",
95673+ next, prev->next, prev);
95674+ BUG();
95675+ return false;
95676+ }
95677+ if (unlikely(new == prev || new == next)) {
95678+ printk(KERN_ERR "list_add double add: new=%p, prev=%p, next=%p.\n",
95679+ new, prev, next);
95680+ BUG();
95681+ return false;
95682+ }
95683+ return true;
95684+}
95685+
95686 void __list_add(struct list_head *new,
95687- struct list_head *prev,
95688- struct list_head *next)
95689+ struct list_head *prev,
95690+ struct list_head *next)
95691 {
95692- WARN(next->prev != prev,
95693- "list_add corruption. next->prev should be "
95694- "prev (%p), but was %p. (next=%p).\n",
95695- prev, next->prev, next);
95696- WARN(prev->next != next,
95697- "list_add corruption. prev->next should be "
95698- "next (%p), but was %p. (prev=%p).\n",
95699- next, prev->next, prev);
95700- WARN(new == prev || new == next,
95701- "list_add double add: new=%p, prev=%p, next=%p.\n",
95702- new, prev, next);
95703+ if (!__list_add_debug(new, prev, next))
95704+ return;
95705+
95706 next->prev = new;
95707 new->next = next;
95708 new->prev = prev;
95709@@ -41,28 +62,46 @@ void __list_add(struct list_head *new,
95710 }
95711 EXPORT_SYMBOL(__list_add);
95712
95713-void __list_del_entry(struct list_head *entry)
95714+static bool __list_del_entry_debug(struct list_head *entry)
95715 {
95716 struct list_head *prev, *next;
95717
95718 prev = entry->prev;
95719 next = entry->next;
95720
95721- if (WARN(next == LIST_POISON1,
95722- "list_del corruption, %p->next is LIST_POISON1 (%p)\n",
95723- entry, LIST_POISON1) ||
95724- WARN(prev == LIST_POISON2,
95725- "list_del corruption, %p->prev is LIST_POISON2 (%p)\n",
95726- entry, LIST_POISON2) ||
95727- WARN(prev->next != entry,
95728- "list_del corruption. prev->next should be %p, "
95729- "but was %p\n", entry, prev->next) ||
95730- WARN(next->prev != entry,
95731- "list_del corruption. next->prev should be %p, "
95732- "but was %p\n", entry, next->prev))
95733+ if (unlikely(next == LIST_POISON1)) {
95734+ printk(KERN_ERR "list_del corruption, %p->next is LIST_POISON1 (%p)\n",
95735+ entry, LIST_POISON1);
95736+ BUG();
95737+ return false;
95738+ }
95739+ if (unlikely(prev == LIST_POISON2)) {
95740+ printk(KERN_ERR "list_del corruption, %p->prev is LIST_POISON2 (%p)\n",
95741+ entry, LIST_POISON2);
95742+ BUG();
95743+ return false;
95744+ }
95745+ if (unlikely(entry->prev->next != entry)) {
95746+ printk(KERN_ERR "list_del corruption. prev->next should be %p, "
95747+ "but was %p\n", entry, prev->next);
95748+ BUG();
95749+ return false;
95750+ }
95751+ if (unlikely(entry->next->prev != entry)) {
95752+ printk(KERN_ERR "list_del corruption. next->prev should be %p, "
95753+ "but was %p\n", entry, next->prev);
95754+ BUG();
95755+ return false;
95756+ }
95757+ return true;
95758+}
95759+
95760+void __list_del_entry(struct list_head *entry)
95761+{
95762+ if (!__list_del_entry_debug(entry))
95763 return;
95764
95765- __list_del(prev, next);
95766+ __list_del(entry->prev, entry->next);
95767 }
95768 EXPORT_SYMBOL(__list_del_entry);
95769
95770@@ -86,15 +125,85 @@ EXPORT_SYMBOL(list_del);
95771 void __list_add_rcu(struct list_head *new,
95772 struct list_head *prev, struct list_head *next)
95773 {
95774- WARN(next->prev != prev,
95775- "list_add_rcu corruption. next->prev should be prev (%p), but was %p. (next=%p).\n",
95776- prev, next->prev, next);
95777- WARN(prev->next != next,
95778- "list_add_rcu corruption. prev->next should be next (%p), but was %p. (prev=%p).\n",
95779- next, prev->next, prev);
95780+ if (!__list_add_debug(new, prev, next))
95781+ return;
95782+
95783 new->next = next;
95784 new->prev = prev;
95785 rcu_assign_pointer(list_next_rcu(prev), new);
95786 next->prev = new;
95787 }
95788 EXPORT_SYMBOL(__list_add_rcu);
95789+#endif
95790+
95791+void __pax_list_add(struct list_head *new, struct list_head *prev, struct list_head *next)
95792+{
95793+#ifdef CONFIG_DEBUG_LIST
95794+ if (!__list_add_debug(new, prev, next))
95795+ return;
95796+#endif
95797+
95798+ pax_open_kernel();
95799+ next->prev = new;
95800+ new->next = next;
95801+ new->prev = prev;
95802+ prev->next = new;
95803+ pax_close_kernel();
95804+}
95805+EXPORT_SYMBOL(__pax_list_add);
95806+
95807+void pax_list_del(struct list_head *entry)
95808+{
95809+#ifdef CONFIG_DEBUG_LIST
95810+ if (!__list_del_entry_debug(entry))
95811+ return;
95812+#endif
95813+
95814+ pax_open_kernel();
95815+ __list_del(entry->prev, entry->next);
95816+ entry->next = LIST_POISON1;
95817+ entry->prev = LIST_POISON2;
95818+ pax_close_kernel();
95819+}
95820+EXPORT_SYMBOL(pax_list_del);
95821+
95822+void pax_list_del_init(struct list_head *entry)
95823+{
95824+ pax_open_kernel();
95825+ __list_del(entry->prev, entry->next);
95826+ INIT_LIST_HEAD(entry);
95827+ pax_close_kernel();
95828+}
95829+EXPORT_SYMBOL(pax_list_del_init);
95830+
95831+void __pax_list_add_rcu(struct list_head *new,
95832+ struct list_head *prev, struct list_head *next)
95833+{
95834+#ifdef CONFIG_DEBUG_LIST
95835+ if (!__list_add_debug(new, prev, next))
95836+ return;
95837+#endif
95838+
95839+ pax_open_kernel();
95840+ new->next = next;
95841+ new->prev = prev;
95842+ rcu_assign_pointer(list_next_rcu(prev), new);
95843+ next->prev = new;
95844+ pax_close_kernel();
95845+}
95846+EXPORT_SYMBOL(__pax_list_add_rcu);
95847+
95848+void pax_list_del_rcu(struct list_head *entry)
95849+{
95850+#ifdef CONFIG_DEBUG_LIST
95851+ if (!__list_del_entry_debug(entry))
95852+ return;
95853+#endif
95854+
95855+ pax_open_kernel();
95856+ __list_del(entry->prev, entry->next);
95857+ entry->next = LIST_POISON1;
95858+ entry->prev = LIST_POISON2;
95859+ pax_close_kernel();
95860+}
95861+EXPORT_SYMBOL(pax_list_del_rcu);
95862diff --git a/lib/lockref.c b/lib/lockref.c
95863index d2233de..fa1a2f6 100644
95864--- a/lib/lockref.c
95865+++ b/lib/lockref.c
95866@@ -48,13 +48,13 @@
95867 void lockref_get(struct lockref *lockref)
95868 {
95869 CMPXCHG_LOOP(
95870- new.count++;
95871+ __lockref_inc(&new);
95872 ,
95873 return;
95874 );
95875
95876 spin_lock(&lockref->lock);
95877- lockref->count++;
95878+ __lockref_inc(lockref);
95879 spin_unlock(&lockref->lock);
95880 }
95881 EXPORT_SYMBOL(lockref_get);
95882@@ -69,7 +69,7 @@ int lockref_get_not_zero(struct lockref *lockref)
95883 int retval;
95884
95885 CMPXCHG_LOOP(
95886- new.count++;
95887+ __lockref_inc(&new);
95888 if (!old.count)
95889 return 0;
95890 ,
95891@@ -79,7 +79,7 @@ int lockref_get_not_zero(struct lockref *lockref)
95892 spin_lock(&lockref->lock);
95893 retval = 0;
95894 if (lockref->count) {
95895- lockref->count++;
95896+ __lockref_inc(lockref);
95897 retval = 1;
95898 }
95899 spin_unlock(&lockref->lock);
95900@@ -96,7 +96,7 @@ EXPORT_SYMBOL(lockref_get_not_zero);
95901 int lockref_get_or_lock(struct lockref *lockref)
95902 {
95903 CMPXCHG_LOOP(
95904- new.count++;
95905+ __lockref_inc(&new);
95906 if (!old.count)
95907 break;
95908 ,
95909@@ -106,7 +106,7 @@ int lockref_get_or_lock(struct lockref *lockref)
95910 spin_lock(&lockref->lock);
95911 if (!lockref->count)
95912 return 0;
95913- lockref->count++;
95914+ __lockref_inc(lockref);
95915 spin_unlock(&lockref->lock);
95916 return 1;
95917 }
95918@@ -120,7 +120,7 @@ EXPORT_SYMBOL(lockref_get_or_lock);
95919 int lockref_put_or_lock(struct lockref *lockref)
95920 {
95921 CMPXCHG_LOOP(
95922- new.count--;
95923+ __lockref_dec(&new);
95924 if (old.count <= 1)
95925 break;
95926 ,
95927@@ -130,7 +130,7 @@ int lockref_put_or_lock(struct lockref *lockref)
95928 spin_lock(&lockref->lock);
95929 if (lockref->count <= 1)
95930 return 0;
95931- lockref->count--;
95932+ __lockref_dec(lockref);
95933 spin_unlock(&lockref->lock);
95934 return 1;
95935 }
95936@@ -157,7 +157,7 @@ int lockref_get_not_dead(struct lockref *lockref)
95937 int retval;
95938
95939 CMPXCHG_LOOP(
95940- new.count++;
95941+ __lockref_inc(&new);
95942 if ((int)old.count < 0)
95943 return 0;
95944 ,
95945@@ -167,7 +167,7 @@ int lockref_get_not_dead(struct lockref *lockref)
95946 spin_lock(&lockref->lock);
95947 retval = 0;
95948 if ((int) lockref->count >= 0) {
95949- lockref->count++;
95950+ __lockref_inc(lockref);
95951 retval = 1;
95952 }
95953 spin_unlock(&lockref->lock);
95954diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
95955index a89cf09..1a42c2d 100644
95956--- a/lib/percpu-refcount.c
95957+++ b/lib/percpu-refcount.c
95958@@ -29,7 +29,7 @@
95959 * can't hit 0 before we've added up all the percpu refs.
95960 */
95961
95962-#define PCPU_COUNT_BIAS (1U << 31)
95963+#define PCPU_COUNT_BIAS (1U << 30)
95964
95965 static unsigned __percpu *pcpu_count_ptr(struct percpu_ref *ref)
95966 {
95967diff --git a/lib/radix-tree.c b/lib/radix-tree.c
95968index 3291a8e..346a91e 100644
95969--- a/lib/radix-tree.c
95970+++ b/lib/radix-tree.c
95971@@ -67,7 +67,7 @@ struct radix_tree_preload {
95972 int nr;
95973 struct radix_tree_node *nodes[RADIX_TREE_PRELOAD_SIZE];
95974 };
95975-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
95976+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
95977
95978 static inline void *ptr_to_indirect(void *ptr)
95979 {
95980diff --git a/lib/random32.c b/lib/random32.c
95981index c9b6bf3..4752c6d4 100644
95982--- a/lib/random32.c
95983+++ b/lib/random32.c
95984@@ -46,7 +46,7 @@ static inline void prandom_state_selftest(void)
95985 }
95986 #endif
95987
95988-static DEFINE_PER_CPU(struct rnd_state, net_rand_state);
95989+static DEFINE_PER_CPU(struct rnd_state, net_rand_state) __latent_entropy;
95990
95991 /**
95992 * prandom_u32_state - seeded pseudo-random number generator.
95993diff --git a/lib/rbtree.c b/lib/rbtree.c
95994index c16c81a..4dcbda1 100644
95995--- a/lib/rbtree.c
95996+++ b/lib/rbtree.c
95997@@ -380,7 +380,9 @@ static inline void dummy_copy(struct rb_node *old, struct rb_node *new) {}
95998 static inline void dummy_rotate(struct rb_node *old, struct rb_node *new) {}
95999
96000 static const struct rb_augment_callbacks dummy_callbacks = {
96001- dummy_propagate, dummy_copy, dummy_rotate
96002+ .propagate = dummy_propagate,
96003+ .copy = dummy_copy,
96004+ .rotate = dummy_rotate
96005 };
96006
96007 void rb_insert_color(struct rb_node *node, struct rb_root *root)
96008diff --git a/lib/show_mem.c b/lib/show_mem.c
96009index 0922579..9d7adb9 100644
96010--- a/lib/show_mem.c
96011+++ b/lib/show_mem.c
96012@@ -44,6 +44,6 @@ void show_mem(unsigned int filter)
96013 quicklist_total_size());
96014 #endif
96015 #ifdef CONFIG_MEMORY_FAILURE
96016- printk("%lu pages hwpoisoned\n", atomic_long_read(&num_poisoned_pages));
96017+ printk("%lu pages hwpoisoned\n", atomic_long_read_unchecked(&num_poisoned_pages));
96018 #endif
96019 }
96020diff --git a/lib/string.c b/lib/string.c
96021index f3c6ff5..70db57a 100644
96022--- a/lib/string.c
96023+++ b/lib/string.c
96024@@ -604,6 +604,22 @@ void *memset(void *s, int c, size_t count)
96025 EXPORT_SYMBOL(memset);
96026 #endif
96027
96028+/**
96029+ * memzero_explicit - Fill a region of memory (e.g. sensitive
96030+ * keying data) with 0s.
96031+ * @s: Pointer to the start of the area.
96032+ * @count: The size of the area.
96033+ *
96034+ * memzero_explicit() doesn't need an arch-specific version as
96035+ * it just invokes the one of memset() implicitly.
96036+ */
96037+void memzero_explicit(void *s, size_t count)
96038+{
96039+ memset(s, 0, count);
96040+ OPTIMIZER_HIDE_VAR(s);
96041+}
96042+EXPORT_SYMBOL(memzero_explicit);
96043+
96044 #ifndef __HAVE_ARCH_MEMCPY
96045 /**
96046 * memcpy - Copy one area of memory to another
96047diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
96048index bb2b201..46abaf9 100644
96049--- a/lib/strncpy_from_user.c
96050+++ b/lib/strncpy_from_user.c
96051@@ -21,7 +21,7 @@
96052 */
96053 static inline long do_strncpy_from_user(char *dst, const char __user *src, long count, unsigned long max)
96054 {
96055- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
96056+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
96057 long res = 0;
96058
96059 /*
96060diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
96061index a28df52..3d55877 100644
96062--- a/lib/strnlen_user.c
96063+++ b/lib/strnlen_user.c
96064@@ -26,7 +26,7 @@
96065 */
96066 static inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned long max)
96067 {
96068- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
96069+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
96070 long align, res = 0;
96071 unsigned long c;
96072
96073diff --git a/lib/swiotlb.c b/lib/swiotlb.c
96074index 4abda07..b9d3765 100644
96075--- a/lib/swiotlb.c
96076+++ b/lib/swiotlb.c
96077@@ -682,7 +682,7 @@ EXPORT_SYMBOL(swiotlb_alloc_coherent);
96078
96079 void
96080 swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
96081- dma_addr_t dev_addr)
96082+ dma_addr_t dev_addr, struct dma_attrs *attrs)
96083 {
96084 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
96085
96086diff --git a/lib/test_bpf.c b/lib/test_bpf.c
96087index 89e0345..3347efe 100644
96088--- a/lib/test_bpf.c
96089+++ b/lib/test_bpf.c
96090@@ -1798,7 +1798,7 @@ static struct bpf_prog *generate_filter(int which, int *err)
96091 break;
96092
96093 case INTERNAL:
96094- fp = kzalloc(bpf_prog_size(flen), GFP_KERNEL);
96095+ fp = bpf_prog_alloc(bpf_prog_size(flen), 0);
96096 if (fp == NULL) {
96097 pr_cont("UNEXPECTED_FAIL no memory left\n");
96098 *err = -ENOMEM;
96099diff --git a/lib/usercopy.c b/lib/usercopy.c
96100index 4f5b1dd..7cab418 100644
96101--- a/lib/usercopy.c
96102+++ b/lib/usercopy.c
96103@@ -7,3 +7,9 @@ void copy_from_user_overflow(void)
96104 WARN(1, "Buffer overflow detected!\n");
96105 }
96106 EXPORT_SYMBOL(copy_from_user_overflow);
96107+
96108+void copy_to_user_overflow(void)
96109+{
96110+ WARN(1, "Buffer overflow detected!\n");
96111+}
96112+EXPORT_SYMBOL(copy_to_user_overflow);
96113diff --git a/lib/vsprintf.c b/lib/vsprintf.c
96114index 6fe2c84..2fe5ec6 100644
96115--- a/lib/vsprintf.c
96116+++ b/lib/vsprintf.c
96117@@ -16,6 +16,9 @@
96118 * - scnprintf and vscnprintf
96119 */
96120
96121+#ifdef CONFIG_GRKERNSEC_HIDESYM
96122+#define __INCLUDED_BY_HIDESYM 1
96123+#endif
96124 #include <stdarg.h>
96125 #include <linux/module.h> /* for KSYM_SYMBOL_LEN */
96126 #include <linux/types.h>
96127@@ -624,7 +627,7 @@ char *symbol_string(char *buf, char *end, void *ptr,
96128 #ifdef CONFIG_KALLSYMS
96129 if (*fmt == 'B')
96130 sprint_backtrace(sym, value);
96131- else if (*fmt != 'f' && *fmt != 's')
96132+ else if (*fmt != 'f' && *fmt != 's' && *fmt != 'X')
96133 sprint_symbol(sym, value);
96134 else
96135 sprint_symbol_no_offset(sym, value);
96136@@ -1183,7 +1186,11 @@ char *address_val(char *buf, char *end, const void *addr,
96137 return number(buf, end, num, spec);
96138 }
96139
96140+#ifdef CONFIG_GRKERNSEC_HIDESYM
96141+int kptr_restrict __read_mostly = 2;
96142+#else
96143 int kptr_restrict __read_mostly;
96144+#endif
96145
96146 /*
96147 * Show a '%p' thing. A kernel extension is that the '%p' is followed
96148@@ -1194,8 +1201,10 @@ int kptr_restrict __read_mostly;
96149 *
96150 * - 'F' For symbolic function descriptor pointers with offset
96151 * - 'f' For simple symbolic function names without offset
96152+ * - 'X' For simple symbolic function names without offset approved for use with GRKERNSEC_HIDESYM
96153 * - 'S' For symbolic direct pointers with offset
96154 * - 's' For symbolic direct pointers without offset
96155+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
96156 * - '[FfSs]R' as above with __builtin_extract_return_addr() translation
96157 * - 'B' For backtraced symbolic direct pointers with offset
96158 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
96159@@ -1263,12 +1272,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
96160
96161 if (!ptr && *fmt != 'K') {
96162 /*
96163- * Print (null) with the same width as a pointer so it makes
96164+ * Print (nil) with the same width as a pointer so it makes
96165 * tabular output look nice.
96166 */
96167 if (spec.field_width == -1)
96168 spec.field_width = default_width;
96169- return string(buf, end, "(null)", spec);
96170+ return string(buf, end, "(nil)", spec);
96171 }
96172
96173 switch (*fmt) {
96174@@ -1278,6 +1287,14 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
96175 /* Fallthrough */
96176 case 'S':
96177 case 's':
96178+#ifdef CONFIG_GRKERNSEC_HIDESYM
96179+ break;
96180+#else
96181+ return symbol_string(buf, end, ptr, spec, fmt);
96182+#endif
96183+ case 'X':
96184+ ptr = dereference_function_descriptor(ptr);
96185+ case 'A':
96186 case 'B':
96187 return symbol_string(buf, end, ptr, spec, fmt);
96188 case 'R':
96189@@ -1333,6 +1350,8 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
96190 va_end(va);
96191 return buf;
96192 }
96193+ case 'P':
96194+ break;
96195 case 'K':
96196 /*
96197 * %pK cannot be used in IRQ context because its test
96198@@ -1390,6 +1409,22 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
96199 ((const struct file *)ptr)->f_path.dentry,
96200 spec, fmt);
96201 }
96202+
96203+#ifdef CONFIG_GRKERNSEC_HIDESYM
96204+ /* 'P' = approved pointers to copy to userland,
96205+ as in the /proc/kallsyms case, as we make it display nothing
96206+ for non-root users, and the real contents for root users
96207+ 'X' = approved simple symbols
96208+ Also ignore 'K' pointers, since we force their NULLing for non-root users
96209+ above
96210+ */
96211+ if ((unsigned long)ptr > TASK_SIZE && *fmt != 'P' && *fmt != 'X' && *fmt != 'K' && is_usercopy_object(buf)) {
96212+ printk(KERN_ALERT "grsec: kernel infoleak detected! Please report this log to spender@grsecurity.net.\n");
96213+ dump_stack();
96214+ ptr = NULL;
96215+ }
96216+#endif
96217+
96218 spec.flags |= SMALL;
96219 if (spec.field_width == -1) {
96220 spec.field_width = default_width;
96221@@ -2089,11 +2124,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
96222 typeof(type) value; \
96223 if (sizeof(type) == 8) { \
96224 args = PTR_ALIGN(args, sizeof(u32)); \
96225- *(u32 *)&value = *(u32 *)args; \
96226- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
96227+ *(u32 *)&value = *(const u32 *)args; \
96228+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
96229 } else { \
96230 args = PTR_ALIGN(args, sizeof(type)); \
96231- value = *(typeof(type) *)args; \
96232+ value = *(const typeof(type) *)args; \
96233 } \
96234 args += sizeof(type); \
96235 value; \
96236@@ -2156,7 +2191,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
96237 case FORMAT_TYPE_STR: {
96238 const char *str_arg = args;
96239 args += strlen(str_arg) + 1;
96240- str = string(str, end, (char *)str_arg, spec);
96241+ str = string(str, end, str_arg, spec);
96242 break;
96243 }
96244
96245diff --git a/localversion-grsec b/localversion-grsec
96246new file mode 100644
96247index 0000000..7cd6065
96248--- /dev/null
96249+++ b/localversion-grsec
96250@@ -0,0 +1 @@
96251+-grsec
96252diff --git a/mm/Kconfig b/mm/Kconfig
96253index 886db21..f514de2 100644
96254--- a/mm/Kconfig
96255+++ b/mm/Kconfig
96256@@ -333,10 +333,11 @@ config KSM
96257 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
96258
96259 config DEFAULT_MMAP_MIN_ADDR
96260- int "Low address space to protect from user allocation"
96261+ int "Low address space to protect from user allocation"
96262 depends on MMU
96263- default 4096
96264- help
96265+ default 32768 if ALPHA || ARM || PARISC || SPARC32
96266+ default 65536
96267+ help
96268 This is the portion of low virtual memory which should be protected
96269 from userspace allocation. Keeping a user from writing to low pages
96270 can help reduce the impact of kernel NULL pointer bugs.
96271@@ -367,7 +368,7 @@ config MEMORY_FAILURE
96272
96273 config HWPOISON_INJECT
96274 tristate "HWPoison pages injector"
96275- depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS
96276+ depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS && !GRKERNSEC
96277 select PROC_PAGE_MONITOR
96278
96279 config NOMMU_INITIAL_TRIM_EXCESS
96280diff --git a/mm/backing-dev.c b/mm/backing-dev.c
96281index 1706cbb..f89dbca 100644
96282--- a/mm/backing-dev.c
96283+++ b/mm/backing-dev.c
96284@@ -12,7 +12,7 @@
96285 #include <linux/device.h>
96286 #include <trace/events/writeback.h>
96287
96288-static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
96289+static atomic_long_unchecked_t bdi_seq = ATOMIC_LONG_INIT(0);
96290
96291 struct backing_dev_info default_backing_dev_info = {
96292 .name = "default",
96293@@ -533,7 +533,7 @@ int bdi_setup_and_register(struct backing_dev_info *bdi, char *name,
96294 return err;
96295
96296 err = bdi_register(bdi, NULL, "%.28s-%ld", name,
96297- atomic_long_inc_return(&bdi_seq));
96298+ atomic_long_inc_return_unchecked(&bdi_seq));
96299 if (err) {
96300 bdi_destroy(bdi);
96301 return err;
96302diff --git a/mm/filemap.c b/mm/filemap.c
96303index 90effcd..539aa64 100644
96304--- a/mm/filemap.c
96305+++ b/mm/filemap.c
96306@@ -2092,7 +2092,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
96307 struct address_space *mapping = file->f_mapping;
96308
96309 if (!mapping->a_ops->readpage)
96310- return -ENOEXEC;
96311+ return -ENODEV;
96312 file_accessed(file);
96313 vma->vm_ops = &generic_file_vm_ops;
96314 return 0;
96315@@ -2270,6 +2270,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
96316 *pos = i_size_read(inode);
96317
96318 if (limit != RLIM_INFINITY) {
96319+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
96320 if (*pos >= limit) {
96321 send_sig(SIGXFSZ, current, 0);
96322 return -EFBIG;
96323diff --git a/mm/fremap.c b/mm/fremap.c
96324index 72b8fa3..c5b39f1 100644
96325--- a/mm/fremap.c
96326+++ b/mm/fremap.c
96327@@ -180,6 +180,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
96328 retry:
96329 vma = find_vma(mm, start);
96330
96331+#ifdef CONFIG_PAX_SEGMEXEC
96332+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
96333+ goto out;
96334+#endif
96335+
96336 /*
96337 * Make sure the vma is shared, that it supports prefaulting,
96338 * and that the remapped range is valid and fully within
96339diff --git a/mm/gup.c b/mm/gup.c
96340index 91d044b..a58ecf6 100644
96341--- a/mm/gup.c
96342+++ b/mm/gup.c
96343@@ -270,11 +270,6 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
96344 unsigned int fault_flags = 0;
96345 int ret;
96346
96347- /* For mlock, just skip the stack guard page. */
96348- if ((*flags & FOLL_MLOCK) &&
96349- (stack_guard_page_start(vma, address) ||
96350- stack_guard_page_end(vma, address + PAGE_SIZE)))
96351- return -ENOENT;
96352 if (*flags & FOLL_WRITE)
96353 fault_flags |= FAULT_FLAG_WRITE;
96354 if (nonblocking)
96355@@ -436,14 +431,14 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
96356 if (!(gup_flags & FOLL_FORCE))
96357 gup_flags |= FOLL_NUMA;
96358
96359- do {
96360+ while (nr_pages) {
96361 struct page *page;
96362 unsigned int foll_flags = gup_flags;
96363 unsigned int page_increm;
96364
96365 /* first iteration or cross vma bound */
96366 if (!vma || start >= vma->vm_end) {
96367- vma = find_extend_vma(mm, start);
96368+ vma = find_vma(mm, start);
96369 if (!vma && in_gate_area(mm, start)) {
96370 int ret;
96371 ret = get_gate_page(mm, start & PAGE_MASK,
96372@@ -455,7 +450,7 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
96373 goto next_page;
96374 }
96375
96376- if (!vma || check_vma_flags(vma, gup_flags))
96377+ if (!vma || start < vma->vm_start || check_vma_flags(vma, gup_flags))
96378 return i ? : -EFAULT;
96379 if (is_vm_hugetlb_page(vma)) {
96380 i = follow_hugetlb_page(mm, vma, pages, vmas,
96381@@ -510,7 +505,7 @@ next_page:
96382 i += page_increm;
96383 start += page_increm * PAGE_SIZE;
96384 nr_pages -= page_increm;
96385- } while (nr_pages);
96386+ }
96387 return i;
96388 }
96389 EXPORT_SYMBOL(__get_user_pages);
96390diff --git a/mm/highmem.c b/mm/highmem.c
96391index 123bcd3..0de52ba 100644
96392--- a/mm/highmem.c
96393+++ b/mm/highmem.c
96394@@ -195,8 +195,9 @@ static void flush_all_zero_pkmaps(void)
96395 * So no dangers, even with speculative execution.
96396 */
96397 page = pte_page(pkmap_page_table[i]);
96398+ pax_open_kernel();
96399 pte_clear(&init_mm, PKMAP_ADDR(i), &pkmap_page_table[i]);
96400-
96401+ pax_close_kernel();
96402 set_page_address(page, NULL);
96403 need_flush = 1;
96404 }
96405@@ -259,9 +260,11 @@ start:
96406 }
96407 }
96408 vaddr = PKMAP_ADDR(last_pkmap_nr);
96409+
96410+ pax_open_kernel();
96411 set_pte_at(&init_mm, vaddr,
96412 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
96413-
96414+ pax_close_kernel();
96415 pkmap_count[last_pkmap_nr] = 1;
96416 set_page_address(page, (void *)vaddr);
96417
96418diff --git a/mm/hugetlb.c b/mm/hugetlb.c
96419index eeceeeb..a209d58 100644
96420--- a/mm/hugetlb.c
96421+++ b/mm/hugetlb.c
96422@@ -2258,6 +2258,7 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
96423 struct ctl_table *table, int write,
96424 void __user *buffer, size_t *length, loff_t *ppos)
96425 {
96426+ ctl_table_no_const t;
96427 struct hstate *h = &default_hstate;
96428 unsigned long tmp = h->max_huge_pages;
96429 int ret;
96430@@ -2265,9 +2266,10 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
96431 if (!hugepages_supported())
96432 return -ENOTSUPP;
96433
96434- table->data = &tmp;
96435- table->maxlen = sizeof(unsigned long);
96436- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
96437+ t = *table;
96438+ t.data = &tmp;
96439+ t.maxlen = sizeof(unsigned long);
96440+ ret = proc_doulongvec_minmax(&t, write, buffer, length, ppos);
96441 if (ret)
96442 goto out;
96443
96444@@ -2302,6 +2304,7 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
96445 struct hstate *h = &default_hstate;
96446 unsigned long tmp;
96447 int ret;
96448+ ctl_table_no_const hugetlb_table;
96449
96450 if (!hugepages_supported())
96451 return -ENOTSUPP;
96452@@ -2311,9 +2314,10 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
96453 if (write && hstate_is_gigantic(h))
96454 return -EINVAL;
96455
96456- table->data = &tmp;
96457- table->maxlen = sizeof(unsigned long);
96458- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
96459+ hugetlb_table = *table;
96460+ hugetlb_table.data = &tmp;
96461+ hugetlb_table.maxlen = sizeof(unsigned long);
96462+ ret = proc_doulongvec_minmax(&hugetlb_table, write, buffer, length, ppos);
96463 if (ret)
96464 goto out;
96465
96466@@ -2792,6 +2796,27 @@ static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
96467 mutex_unlock(&mapping->i_mmap_mutex);
96468 }
96469
96470+#ifdef CONFIG_PAX_SEGMEXEC
96471+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
96472+{
96473+ struct mm_struct *mm = vma->vm_mm;
96474+ struct vm_area_struct *vma_m;
96475+ unsigned long address_m;
96476+ pte_t *ptep_m;
96477+
96478+ vma_m = pax_find_mirror_vma(vma);
96479+ if (!vma_m)
96480+ return;
96481+
96482+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
96483+ address_m = address + SEGMEXEC_TASK_SIZE;
96484+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
96485+ get_page(page_m);
96486+ hugepage_add_anon_rmap(page_m, vma_m, address_m);
96487+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
96488+}
96489+#endif
96490+
96491 /*
96492 * Hugetlb_cow() should be called with page lock of the original hugepage held.
96493 * Called with hugetlb_instantiation_mutex held and pte_page locked so we
96494@@ -2903,6 +2928,11 @@ retry_avoidcopy:
96495 make_huge_pte(vma, new_page, 1));
96496 page_remove_rmap(old_page);
96497 hugepage_add_new_anon_rmap(new_page, vma, address);
96498+
96499+#ifdef CONFIG_PAX_SEGMEXEC
96500+ pax_mirror_huge_pte(vma, address, new_page);
96501+#endif
96502+
96503 /* Make the old page be freed below */
96504 new_page = old_page;
96505 }
96506@@ -3063,6 +3093,10 @@ retry:
96507 && (vma->vm_flags & VM_SHARED)));
96508 set_huge_pte_at(mm, address, ptep, new_pte);
96509
96510+#ifdef CONFIG_PAX_SEGMEXEC
96511+ pax_mirror_huge_pte(vma, address, page);
96512+#endif
96513+
96514 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
96515 /* Optimization, do the COW without a second fault */
96516 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page, ptl);
96517@@ -3129,6 +3163,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
96518 struct hstate *h = hstate_vma(vma);
96519 struct address_space *mapping;
96520
96521+#ifdef CONFIG_PAX_SEGMEXEC
96522+ struct vm_area_struct *vma_m;
96523+#endif
96524+
96525 address &= huge_page_mask(h);
96526
96527 ptep = huge_pte_offset(mm, address);
96528@@ -3142,6 +3180,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
96529 VM_FAULT_SET_HINDEX(hstate_index(h));
96530 }
96531
96532+#ifdef CONFIG_PAX_SEGMEXEC
96533+ vma_m = pax_find_mirror_vma(vma);
96534+ if (vma_m) {
96535+ unsigned long address_m;
96536+
96537+ if (vma->vm_start > vma_m->vm_start) {
96538+ address_m = address;
96539+ address -= SEGMEXEC_TASK_SIZE;
96540+ vma = vma_m;
96541+ h = hstate_vma(vma);
96542+ } else
96543+ address_m = address + SEGMEXEC_TASK_SIZE;
96544+
96545+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
96546+ return VM_FAULT_OOM;
96547+ address_m &= HPAGE_MASK;
96548+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
96549+ }
96550+#endif
96551+
96552 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
96553 if (!ptep)
96554 return VM_FAULT_OOM;
96555diff --git a/mm/internal.h b/mm/internal.h
96556index a1b651b..f688570 100644
96557--- a/mm/internal.h
96558+++ b/mm/internal.h
96559@@ -109,6 +109,7 @@ extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
96560 * in mm/page_alloc.c
96561 */
96562 extern void __free_pages_bootmem(struct page *page, unsigned int order);
96563+extern void free_compound_page(struct page *page);
96564 extern void prep_compound_page(struct page *page, unsigned long order);
96565 #ifdef CONFIG_MEMORY_FAILURE
96566 extern bool is_free_buddy_page(struct page *page);
96567@@ -351,7 +352,7 @@ extern u32 hwpoison_filter_enable;
96568
96569 extern unsigned long vm_mmap_pgoff(struct file *, unsigned long,
96570 unsigned long, unsigned long,
96571- unsigned long, unsigned long);
96572+ unsigned long, unsigned long) __intentional_overflow(-1);
96573
96574 extern void set_pageblock_order(void);
96575 unsigned long reclaim_clean_pages_from_list(struct zone *zone,
96576diff --git a/mm/iov_iter.c b/mm/iov_iter.c
96577index 9a09f20..6ef0515 100644
96578--- a/mm/iov_iter.c
96579+++ b/mm/iov_iter.c
96580@@ -173,7 +173,7 @@ static size_t __iovec_copy_from_user_inatomic(char *vaddr,
96581
96582 while (bytes) {
96583 char __user *buf = iov->iov_base + base;
96584- int copy = min(bytes, iov->iov_len - base);
96585+ size_t copy = min(bytes, iov->iov_len - base);
96586
96587 base = 0;
96588 left = __copy_from_user_inatomic(vaddr, buf, copy);
96589@@ -201,7 +201,7 @@ static size_t copy_from_user_atomic_iovec(struct page *page,
96590
96591 kaddr = kmap_atomic(page);
96592 if (likely(i->nr_segs == 1)) {
96593- int left;
96594+ size_t left;
96595 char __user *buf = i->iov->iov_base + i->iov_offset;
96596 left = __copy_from_user_inatomic(kaddr + offset, buf, bytes);
96597 copied = bytes - left;
96598@@ -231,7 +231,7 @@ static void advance_iovec(struct iov_iter *i, size_t bytes)
96599 * zero-length segments (without overruning the iovec).
96600 */
96601 while (bytes || unlikely(i->count && !iov->iov_len)) {
96602- int copy;
96603+ size_t copy;
96604
96605 copy = min(bytes, iov->iov_len - base);
96606 BUG_ON(!i->count || i->count < copy);
96607diff --git a/mm/kmemleak.c b/mm/kmemleak.c
96608index 3cda50c..032ba634 100644
96609--- a/mm/kmemleak.c
96610+++ b/mm/kmemleak.c
96611@@ -364,7 +364,7 @@ static void print_unreferenced(struct seq_file *seq,
96612
96613 for (i = 0; i < object->trace_len; i++) {
96614 void *ptr = (void *)object->trace[i];
96615- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
96616+ seq_printf(seq, " [<%pP>] %pA\n", ptr, ptr);
96617 }
96618 }
96619
96620@@ -1905,7 +1905,7 @@ static int __init kmemleak_late_init(void)
96621 return -ENOMEM;
96622 }
96623
96624- dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL,
96625+ dentry = debugfs_create_file("kmemleak", S_IRUSR, NULL, NULL,
96626 &kmemleak_fops);
96627 if (!dentry)
96628 pr_warning("Failed to create the debugfs kmemleak file\n");
96629diff --git a/mm/maccess.c b/mm/maccess.c
96630index d53adf9..03a24bf 100644
96631--- a/mm/maccess.c
96632+++ b/mm/maccess.c
96633@@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
96634 set_fs(KERNEL_DS);
96635 pagefault_disable();
96636 ret = __copy_from_user_inatomic(dst,
96637- (__force const void __user *)src, size);
96638+ (const void __force_user *)src, size);
96639 pagefault_enable();
96640 set_fs(old_fs);
96641
96642@@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
96643
96644 set_fs(KERNEL_DS);
96645 pagefault_disable();
96646- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
96647+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
96648 pagefault_enable();
96649 set_fs(old_fs);
96650
96651diff --git a/mm/madvise.c b/mm/madvise.c
96652index 0938b30..199abe8 100644
96653--- a/mm/madvise.c
96654+++ b/mm/madvise.c
96655@@ -51,6 +51,10 @@ static long madvise_behavior(struct vm_area_struct *vma,
96656 pgoff_t pgoff;
96657 unsigned long new_flags = vma->vm_flags;
96658
96659+#ifdef CONFIG_PAX_SEGMEXEC
96660+ struct vm_area_struct *vma_m;
96661+#endif
96662+
96663 switch (behavior) {
96664 case MADV_NORMAL:
96665 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
96666@@ -126,6 +130,13 @@ success:
96667 /*
96668 * vm_flags is protected by the mmap_sem held in write mode.
96669 */
96670+
96671+#ifdef CONFIG_PAX_SEGMEXEC
96672+ vma_m = pax_find_mirror_vma(vma);
96673+ if (vma_m)
96674+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
96675+#endif
96676+
96677 vma->vm_flags = new_flags;
96678
96679 out:
96680@@ -274,6 +285,11 @@ static long madvise_dontneed(struct vm_area_struct *vma,
96681 struct vm_area_struct **prev,
96682 unsigned long start, unsigned long end)
96683 {
96684+
96685+#ifdef CONFIG_PAX_SEGMEXEC
96686+ struct vm_area_struct *vma_m;
96687+#endif
96688+
96689 *prev = vma;
96690 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
96691 return -EINVAL;
96692@@ -286,6 +302,21 @@ static long madvise_dontneed(struct vm_area_struct *vma,
96693 zap_page_range(vma, start, end - start, &details);
96694 } else
96695 zap_page_range(vma, start, end - start, NULL);
96696+
96697+#ifdef CONFIG_PAX_SEGMEXEC
96698+ vma_m = pax_find_mirror_vma(vma);
96699+ if (vma_m) {
96700+ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
96701+ struct zap_details details = {
96702+ .nonlinear_vma = vma_m,
96703+ .last_index = ULONG_MAX,
96704+ };
96705+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
96706+ } else
96707+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
96708+ }
96709+#endif
96710+
96711 return 0;
96712 }
96713
96714@@ -488,6 +519,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
96715 if (end < start)
96716 return error;
96717
96718+#ifdef CONFIG_PAX_SEGMEXEC
96719+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
96720+ if (end > SEGMEXEC_TASK_SIZE)
96721+ return error;
96722+ } else
96723+#endif
96724+
96725+ if (end > TASK_SIZE)
96726+ return error;
96727+
96728 error = 0;
96729 if (end == start)
96730 return error;
96731diff --git a/mm/memory-failure.c b/mm/memory-failure.c
96732index 44c6bd2..60369dc3 100644
96733--- a/mm/memory-failure.c
96734+++ b/mm/memory-failure.c
96735@@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
96736
96737 int sysctl_memory_failure_recovery __read_mostly = 1;
96738
96739-atomic_long_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
96740+atomic_long_unchecked_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
96741
96742 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
96743
96744@@ -198,7 +198,7 @@ static int kill_proc(struct task_struct *t, unsigned long addr, int trapno,
96745 pfn, t->comm, t->pid);
96746 si.si_signo = SIGBUS;
96747 si.si_errno = 0;
96748- si.si_addr = (void *)addr;
96749+ si.si_addr = (void __user *)addr;
96750 #ifdef __ARCH_SI_TRAPNO
96751 si.si_trapno = trapno;
96752 #endif
96753@@ -791,7 +791,7 @@ static struct page_state {
96754 unsigned long res;
96755 char *msg;
96756 int (*action)(struct page *p, unsigned long pfn);
96757-} error_states[] = {
96758+} __do_const error_states[] = {
96759 { reserved, reserved, "reserved kernel", me_kernel },
96760 /*
96761 * free pages are specially detected outside this table:
96762@@ -1099,7 +1099,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
96763 nr_pages = 1 << compound_order(hpage);
96764 else /* normal page or thp */
96765 nr_pages = 1;
96766- atomic_long_add(nr_pages, &num_poisoned_pages);
96767+ atomic_long_add_unchecked(nr_pages, &num_poisoned_pages);
96768
96769 /*
96770 * We need/can do nothing about count=0 pages.
96771@@ -1128,7 +1128,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
96772 if (PageHWPoison(hpage)) {
96773 if ((hwpoison_filter(p) && TestClearPageHWPoison(p))
96774 || (p != hpage && TestSetPageHWPoison(hpage))) {
96775- atomic_long_sub(nr_pages, &num_poisoned_pages);
96776+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
96777 unlock_page(hpage);
96778 return 0;
96779 }
96780@@ -1196,14 +1196,14 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
96781 */
96782 if (!PageHWPoison(p)) {
96783 printk(KERN_ERR "MCE %#lx: just unpoisoned\n", pfn);
96784- atomic_long_sub(nr_pages, &num_poisoned_pages);
96785+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
96786 put_page(hpage);
96787 res = 0;
96788 goto out;
96789 }
96790 if (hwpoison_filter(p)) {
96791 if (TestClearPageHWPoison(p))
96792- atomic_long_sub(nr_pages, &num_poisoned_pages);
96793+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
96794 unlock_page(hpage);
96795 put_page(hpage);
96796 return 0;
96797@@ -1433,7 +1433,7 @@ int unpoison_memory(unsigned long pfn)
96798 return 0;
96799 }
96800 if (TestClearPageHWPoison(p))
96801- atomic_long_dec(&num_poisoned_pages);
96802+ atomic_long_dec_unchecked(&num_poisoned_pages);
96803 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
96804 return 0;
96805 }
96806@@ -1447,7 +1447,7 @@ int unpoison_memory(unsigned long pfn)
96807 */
96808 if (TestClearPageHWPoison(page)) {
96809 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
96810- atomic_long_sub(nr_pages, &num_poisoned_pages);
96811+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
96812 freeit = 1;
96813 if (PageHuge(page))
96814 clear_page_hwpoison_huge_page(page);
96815@@ -1572,11 +1572,11 @@ static int soft_offline_huge_page(struct page *page, int flags)
96816 if (PageHuge(page)) {
96817 set_page_hwpoison_huge_page(hpage);
96818 dequeue_hwpoisoned_huge_page(hpage);
96819- atomic_long_add(1 << compound_order(hpage),
96820+ atomic_long_add_unchecked(1 << compound_order(hpage),
96821 &num_poisoned_pages);
96822 } else {
96823 SetPageHWPoison(page);
96824- atomic_long_inc(&num_poisoned_pages);
96825+ atomic_long_inc_unchecked(&num_poisoned_pages);
96826 }
96827 }
96828 return ret;
96829@@ -1615,7 +1615,7 @@ static int __soft_offline_page(struct page *page, int flags)
96830 put_page(page);
96831 pr_info("soft_offline: %#lx: invalidated\n", pfn);
96832 SetPageHWPoison(page);
96833- atomic_long_inc(&num_poisoned_pages);
96834+ atomic_long_inc_unchecked(&num_poisoned_pages);
96835 return 0;
96836 }
96837
96838@@ -1666,7 +1666,7 @@ static int __soft_offline_page(struct page *page, int flags)
96839 if (!is_free_buddy_page(page))
96840 pr_info("soft offline: %#lx: page leaked\n",
96841 pfn);
96842- atomic_long_inc(&num_poisoned_pages);
96843+ atomic_long_inc_unchecked(&num_poisoned_pages);
96844 }
96845 } else {
96846 pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n",
96847@@ -1736,11 +1736,11 @@ int soft_offline_page(struct page *page, int flags)
96848 if (PageHuge(page)) {
96849 set_page_hwpoison_huge_page(hpage);
96850 dequeue_hwpoisoned_huge_page(hpage);
96851- atomic_long_add(1 << compound_order(hpage),
96852+ atomic_long_add_unchecked(1 << compound_order(hpage),
96853 &num_poisoned_pages);
96854 } else {
96855 SetPageHWPoison(page);
96856- atomic_long_inc(&num_poisoned_pages);
96857+ atomic_long_inc_unchecked(&num_poisoned_pages);
96858 }
96859 }
96860 unset_migratetype_isolate(page, MIGRATE_MOVABLE);
96861diff --git a/mm/memory.c b/mm/memory.c
96862index e229970..68218aa 100644
96863--- a/mm/memory.c
96864+++ b/mm/memory.c
96865@@ -415,6 +415,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
96866 free_pte_range(tlb, pmd, addr);
96867 } while (pmd++, addr = next, addr != end);
96868
96869+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
96870 start &= PUD_MASK;
96871 if (start < floor)
96872 return;
96873@@ -429,6 +430,8 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
96874 pmd = pmd_offset(pud, start);
96875 pud_clear(pud);
96876 pmd_free_tlb(tlb, pmd, start);
96877+#endif
96878+
96879 }
96880
96881 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
96882@@ -448,6 +451,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
96883 free_pmd_range(tlb, pud, addr, next, floor, ceiling);
96884 } while (pud++, addr = next, addr != end);
96885
96886+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
96887 start &= PGDIR_MASK;
96888 if (start < floor)
96889 return;
96890@@ -462,6 +466,8 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
96891 pud = pud_offset(pgd, start);
96892 pgd_clear(pgd);
96893 pud_free_tlb(tlb, pud, start);
96894+#endif
96895+
96896 }
96897
96898 /*
96899@@ -691,10 +697,10 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
96900 * Choose text because data symbols depend on CONFIG_KALLSYMS_ALL=y
96901 */
96902 if (vma->vm_ops)
96903- printk(KERN_ALERT "vma->vm_ops->fault: %pSR\n",
96904+ printk(KERN_ALERT "vma->vm_ops->fault: %pAR\n",
96905 vma->vm_ops->fault);
96906 if (vma->vm_file)
96907- printk(KERN_ALERT "vma->vm_file->f_op->mmap: %pSR\n",
96908+ printk(KERN_ALERT "vma->vm_file->f_op->mmap: %pAR\n",
96909 vma->vm_file->f_op->mmap);
96910 dump_stack();
96911 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
96912@@ -1147,6 +1153,7 @@ again:
96913 print_bad_pte(vma, addr, ptent, page);
96914 if (unlikely(!__tlb_remove_page(tlb, page))) {
96915 force_flush = 1;
96916+ addr += PAGE_SIZE;
96917 break;
96918 }
96919 continue;
96920@@ -1500,6 +1507,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
96921 page_add_file_rmap(page);
96922 set_pte_at(mm, addr, pte, mk_pte(page, prot));
96923
96924+#ifdef CONFIG_PAX_SEGMEXEC
96925+ pax_mirror_file_pte(vma, addr, page, ptl);
96926+#endif
96927+
96928 retval = 0;
96929 pte_unmap_unlock(pte, ptl);
96930 return retval;
96931@@ -1544,9 +1555,21 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
96932 if (!page_count(page))
96933 return -EINVAL;
96934 if (!(vma->vm_flags & VM_MIXEDMAP)) {
96935+
96936+#ifdef CONFIG_PAX_SEGMEXEC
96937+ struct vm_area_struct *vma_m;
96938+#endif
96939+
96940 BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem));
96941 BUG_ON(vma->vm_flags & VM_PFNMAP);
96942 vma->vm_flags |= VM_MIXEDMAP;
96943+
96944+#ifdef CONFIG_PAX_SEGMEXEC
96945+ vma_m = pax_find_mirror_vma(vma);
96946+ if (vma_m)
96947+ vma_m->vm_flags |= VM_MIXEDMAP;
96948+#endif
96949+
96950 }
96951 return insert_page(vma, addr, page, vma->vm_page_prot);
96952 }
96953@@ -1629,6 +1652,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
96954 unsigned long pfn)
96955 {
96956 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
96957+ BUG_ON(vma->vm_mirror);
96958
96959 if (addr < vma->vm_start || addr >= vma->vm_end)
96960 return -EFAULT;
96961@@ -1876,7 +1900,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
96962
96963 BUG_ON(pud_huge(*pud));
96964
96965- pmd = pmd_alloc(mm, pud, addr);
96966+ pmd = (mm == &init_mm) ?
96967+ pmd_alloc_kernel(mm, pud, addr) :
96968+ pmd_alloc(mm, pud, addr);
96969 if (!pmd)
96970 return -ENOMEM;
96971 do {
96972@@ -1896,7 +1922,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
96973 unsigned long next;
96974 int err;
96975
96976- pud = pud_alloc(mm, pgd, addr);
96977+ pud = (mm == &init_mm) ?
96978+ pud_alloc_kernel(mm, pgd, addr) :
96979+ pud_alloc(mm, pgd, addr);
96980 if (!pud)
96981 return -ENOMEM;
96982 do {
96983@@ -2018,6 +2046,186 @@ static int do_page_mkwrite(struct vm_area_struct *vma, struct page *page,
96984 return ret;
96985 }
96986
96987+#ifdef CONFIG_PAX_SEGMEXEC
96988+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
96989+{
96990+ struct mm_struct *mm = vma->vm_mm;
96991+ spinlock_t *ptl;
96992+ pte_t *pte, entry;
96993+
96994+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
96995+ entry = *pte;
96996+ if (!pte_present(entry)) {
96997+ if (!pte_none(entry)) {
96998+ BUG_ON(pte_file(entry));
96999+ free_swap_and_cache(pte_to_swp_entry(entry));
97000+ pte_clear_not_present_full(mm, address, pte, 0);
97001+ }
97002+ } else {
97003+ struct page *page;
97004+
97005+ flush_cache_page(vma, address, pte_pfn(entry));
97006+ entry = ptep_clear_flush(vma, address, pte);
97007+ BUG_ON(pte_dirty(entry));
97008+ page = vm_normal_page(vma, address, entry);
97009+ if (page) {
97010+ update_hiwater_rss(mm);
97011+ if (PageAnon(page))
97012+ dec_mm_counter_fast(mm, MM_ANONPAGES);
97013+ else
97014+ dec_mm_counter_fast(mm, MM_FILEPAGES);
97015+ page_remove_rmap(page);
97016+ page_cache_release(page);
97017+ }
97018+ }
97019+ pte_unmap_unlock(pte, ptl);
97020+}
97021+
97022+/* PaX: if vma is mirrored, synchronize the mirror's PTE
97023+ *
97024+ * the ptl of the lower mapped page is held on entry and is not released on exit
97025+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
97026+ */
97027+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
97028+{
97029+ struct mm_struct *mm = vma->vm_mm;
97030+ unsigned long address_m;
97031+ spinlock_t *ptl_m;
97032+ struct vm_area_struct *vma_m;
97033+ pmd_t *pmd_m;
97034+ pte_t *pte_m, entry_m;
97035+
97036+ BUG_ON(!page_m || !PageAnon(page_m));
97037+
97038+ vma_m = pax_find_mirror_vma(vma);
97039+ if (!vma_m)
97040+ return;
97041+
97042+ BUG_ON(!PageLocked(page_m));
97043+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
97044+ address_m = address + SEGMEXEC_TASK_SIZE;
97045+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
97046+ pte_m = pte_offset_map(pmd_m, address_m);
97047+ ptl_m = pte_lockptr(mm, pmd_m);
97048+ if (ptl != ptl_m) {
97049+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
97050+ if (!pte_none(*pte_m))
97051+ goto out;
97052+ }
97053+
97054+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
97055+ page_cache_get(page_m);
97056+ page_add_anon_rmap(page_m, vma_m, address_m);
97057+ inc_mm_counter_fast(mm, MM_ANONPAGES);
97058+ set_pte_at(mm, address_m, pte_m, entry_m);
97059+ update_mmu_cache(vma_m, address_m, pte_m);
97060+out:
97061+ if (ptl != ptl_m)
97062+ spin_unlock(ptl_m);
97063+ pte_unmap(pte_m);
97064+ unlock_page(page_m);
97065+}
97066+
97067+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
97068+{
97069+ struct mm_struct *mm = vma->vm_mm;
97070+ unsigned long address_m;
97071+ spinlock_t *ptl_m;
97072+ struct vm_area_struct *vma_m;
97073+ pmd_t *pmd_m;
97074+ pte_t *pte_m, entry_m;
97075+
97076+ BUG_ON(!page_m || PageAnon(page_m));
97077+
97078+ vma_m = pax_find_mirror_vma(vma);
97079+ if (!vma_m)
97080+ return;
97081+
97082+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
97083+ address_m = address + SEGMEXEC_TASK_SIZE;
97084+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
97085+ pte_m = pte_offset_map(pmd_m, address_m);
97086+ ptl_m = pte_lockptr(mm, pmd_m);
97087+ if (ptl != ptl_m) {
97088+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
97089+ if (!pte_none(*pte_m))
97090+ goto out;
97091+ }
97092+
97093+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
97094+ page_cache_get(page_m);
97095+ page_add_file_rmap(page_m);
97096+ inc_mm_counter_fast(mm, MM_FILEPAGES);
97097+ set_pte_at(mm, address_m, pte_m, entry_m);
97098+ update_mmu_cache(vma_m, address_m, pte_m);
97099+out:
97100+ if (ptl != ptl_m)
97101+ spin_unlock(ptl_m);
97102+ pte_unmap(pte_m);
97103+}
97104+
97105+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
97106+{
97107+ struct mm_struct *mm = vma->vm_mm;
97108+ unsigned long address_m;
97109+ spinlock_t *ptl_m;
97110+ struct vm_area_struct *vma_m;
97111+ pmd_t *pmd_m;
97112+ pte_t *pte_m, entry_m;
97113+
97114+ vma_m = pax_find_mirror_vma(vma);
97115+ if (!vma_m)
97116+ return;
97117+
97118+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
97119+ address_m = address + SEGMEXEC_TASK_SIZE;
97120+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
97121+ pte_m = pte_offset_map(pmd_m, address_m);
97122+ ptl_m = pte_lockptr(mm, pmd_m);
97123+ if (ptl != ptl_m) {
97124+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
97125+ if (!pte_none(*pte_m))
97126+ goto out;
97127+ }
97128+
97129+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
97130+ set_pte_at(mm, address_m, pte_m, entry_m);
97131+out:
97132+ if (ptl != ptl_m)
97133+ spin_unlock(ptl_m);
97134+ pte_unmap(pte_m);
97135+}
97136+
97137+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
97138+{
97139+ struct page *page_m;
97140+ pte_t entry;
97141+
97142+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
97143+ goto out;
97144+
97145+ entry = *pte;
97146+ page_m = vm_normal_page(vma, address, entry);
97147+ if (!page_m)
97148+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
97149+ else if (PageAnon(page_m)) {
97150+ if (pax_find_mirror_vma(vma)) {
97151+ pte_unmap_unlock(pte, ptl);
97152+ lock_page(page_m);
97153+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
97154+ if (pte_same(entry, *pte))
97155+ pax_mirror_anon_pte(vma, address, page_m, ptl);
97156+ else
97157+ unlock_page(page_m);
97158+ }
97159+ } else
97160+ pax_mirror_file_pte(vma, address, page_m, ptl);
97161+
97162+out:
97163+ pte_unmap_unlock(pte, ptl);
97164+}
97165+#endif
97166+
97167 /*
97168 * This routine handles present pages, when users try to write
97169 * to a shared page. It is done by copying the page to a new address
97170@@ -2216,6 +2424,12 @@ gotten:
97171 */
97172 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
97173 if (likely(pte_same(*page_table, orig_pte))) {
97174+
97175+#ifdef CONFIG_PAX_SEGMEXEC
97176+ if (pax_find_mirror_vma(vma))
97177+ BUG_ON(!trylock_page(new_page));
97178+#endif
97179+
97180 if (old_page) {
97181 if (!PageAnon(old_page)) {
97182 dec_mm_counter_fast(mm, MM_FILEPAGES);
97183@@ -2269,6 +2483,10 @@ gotten:
97184 page_remove_rmap(old_page);
97185 }
97186
97187+#ifdef CONFIG_PAX_SEGMEXEC
97188+ pax_mirror_anon_pte(vma, address, new_page, ptl);
97189+#endif
97190+
97191 /* Free the old page.. */
97192 new_page = old_page;
97193 ret |= VM_FAULT_WRITE;
97194@@ -2543,6 +2761,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
97195 swap_free(entry);
97196 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
97197 try_to_free_swap(page);
97198+
97199+#ifdef CONFIG_PAX_SEGMEXEC
97200+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
97201+#endif
97202+
97203 unlock_page(page);
97204 if (page != swapcache) {
97205 /*
97206@@ -2566,6 +2789,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
97207
97208 /* No need to invalidate - it was non-present before */
97209 update_mmu_cache(vma, address, page_table);
97210+
97211+#ifdef CONFIG_PAX_SEGMEXEC
97212+ pax_mirror_anon_pte(vma, address, page, ptl);
97213+#endif
97214+
97215 unlock:
97216 pte_unmap_unlock(page_table, ptl);
97217 out:
97218@@ -2585,40 +2813,6 @@ out_release:
97219 }
97220
97221 /*
97222- * This is like a special single-page "expand_{down|up}wards()",
97223- * except we must first make sure that 'address{-|+}PAGE_SIZE'
97224- * doesn't hit another vma.
97225- */
97226-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
97227-{
97228- address &= PAGE_MASK;
97229- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
97230- struct vm_area_struct *prev = vma->vm_prev;
97231-
97232- /*
97233- * Is there a mapping abutting this one below?
97234- *
97235- * That's only ok if it's the same stack mapping
97236- * that has gotten split..
97237- */
97238- if (prev && prev->vm_end == address)
97239- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
97240-
97241- expand_downwards(vma, address - PAGE_SIZE);
97242- }
97243- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
97244- struct vm_area_struct *next = vma->vm_next;
97245-
97246- /* As VM_GROWSDOWN but s/below/above/ */
97247- if (next && next->vm_start == address + PAGE_SIZE)
97248- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
97249-
97250- expand_upwards(vma, address + PAGE_SIZE);
97251- }
97252- return 0;
97253-}
97254-
97255-/*
97256 * We enter with non-exclusive mmap_sem (to exclude vma changes,
97257 * but allow concurrent faults), and pte mapped but not yet locked.
97258 * We return with mmap_sem still held, but pte unmapped and unlocked.
97259@@ -2628,27 +2822,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
97260 unsigned int flags)
97261 {
97262 struct mem_cgroup *memcg;
97263- struct page *page;
97264+ struct page *page = NULL;
97265 spinlock_t *ptl;
97266 pte_t entry;
97267
97268- pte_unmap(page_table);
97269-
97270- /* Check if we need to add a guard page to the stack */
97271- if (check_stack_guard_page(vma, address) < 0)
97272- return VM_FAULT_SIGBUS;
97273-
97274- /* Use the zero-page for reads */
97275 if (!(flags & FAULT_FLAG_WRITE)) {
97276 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
97277 vma->vm_page_prot));
97278- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
97279+ ptl = pte_lockptr(mm, pmd);
97280+ spin_lock(ptl);
97281 if (!pte_none(*page_table))
97282 goto unlock;
97283 goto setpte;
97284 }
97285
97286 /* Allocate our own private page. */
97287+ pte_unmap(page_table);
97288+
97289 if (unlikely(anon_vma_prepare(vma)))
97290 goto oom;
97291 page = alloc_zeroed_user_highpage_movable(vma, address);
97292@@ -2672,6 +2862,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
97293 if (!pte_none(*page_table))
97294 goto release;
97295
97296+#ifdef CONFIG_PAX_SEGMEXEC
97297+ if (pax_find_mirror_vma(vma))
97298+ BUG_ON(!trylock_page(page));
97299+#endif
97300+
97301 inc_mm_counter_fast(mm, MM_ANONPAGES);
97302 page_add_new_anon_rmap(page, vma, address);
97303 mem_cgroup_commit_charge(page, memcg, false);
97304@@ -2681,6 +2876,12 @@ setpte:
97305
97306 /* No need to invalidate - it was non-present before */
97307 update_mmu_cache(vma, address, page_table);
97308+
97309+#ifdef CONFIG_PAX_SEGMEXEC
97310+ if (page)
97311+ pax_mirror_anon_pte(vma, address, page, ptl);
97312+#endif
97313+
97314 unlock:
97315 pte_unmap_unlock(page_table, ptl);
97316 return 0;
97317@@ -2911,6 +3112,11 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma,
97318 return ret;
97319 }
97320 do_set_pte(vma, address, fault_page, pte, false, false);
97321+
97322+#ifdef CONFIG_PAX_SEGMEXEC
97323+ pax_mirror_file_pte(vma, address, fault_page, ptl);
97324+#endif
97325+
97326 unlock_page(fault_page);
97327 unlock_out:
97328 pte_unmap_unlock(pte, ptl);
97329@@ -2953,7 +3159,18 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
97330 page_cache_release(fault_page);
97331 goto uncharge_out;
97332 }
97333+
97334+#ifdef CONFIG_PAX_SEGMEXEC
97335+ if (pax_find_mirror_vma(vma))
97336+ BUG_ON(!trylock_page(new_page));
97337+#endif
97338+
97339 do_set_pte(vma, address, new_page, pte, true, true);
97340+
97341+#ifdef CONFIG_PAX_SEGMEXEC
97342+ pax_mirror_anon_pte(vma, address, new_page, ptl);
97343+#endif
97344+
97345 mem_cgroup_commit_charge(new_page, memcg, false);
97346 lru_cache_add_active_or_unevictable(new_page, vma);
97347 pte_unmap_unlock(pte, ptl);
97348@@ -3003,6 +3220,11 @@ static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma,
97349 return ret;
97350 }
97351 do_set_pte(vma, address, fault_page, pte, true, false);
97352+
97353+#ifdef CONFIG_PAX_SEGMEXEC
97354+ pax_mirror_file_pte(vma, address, fault_page, ptl);
97355+#endif
97356+
97357 pte_unmap_unlock(pte, ptl);
97358
97359 if (set_page_dirty(fault_page))
97360@@ -3244,6 +3466,12 @@ static int handle_pte_fault(struct mm_struct *mm,
97361 if (flags & FAULT_FLAG_WRITE)
97362 flush_tlb_fix_spurious_fault(vma, address);
97363 }
97364+
97365+#ifdef CONFIG_PAX_SEGMEXEC
97366+ pax_mirror_pte(vma, address, pte, pmd, ptl);
97367+ return 0;
97368+#endif
97369+
97370 unlock:
97371 pte_unmap_unlock(pte, ptl);
97372 return 0;
97373@@ -3263,9 +3491,41 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
97374 pmd_t *pmd;
97375 pte_t *pte;
97376
97377+#ifdef CONFIG_PAX_SEGMEXEC
97378+ struct vm_area_struct *vma_m;
97379+#endif
97380+
97381 if (unlikely(is_vm_hugetlb_page(vma)))
97382 return hugetlb_fault(mm, vma, address, flags);
97383
97384+#ifdef CONFIG_PAX_SEGMEXEC
97385+ vma_m = pax_find_mirror_vma(vma);
97386+ if (vma_m) {
97387+ unsigned long address_m;
97388+ pgd_t *pgd_m;
97389+ pud_t *pud_m;
97390+ pmd_t *pmd_m;
97391+
97392+ if (vma->vm_start > vma_m->vm_start) {
97393+ address_m = address;
97394+ address -= SEGMEXEC_TASK_SIZE;
97395+ vma = vma_m;
97396+ } else
97397+ address_m = address + SEGMEXEC_TASK_SIZE;
97398+
97399+ pgd_m = pgd_offset(mm, address_m);
97400+ pud_m = pud_alloc(mm, pgd_m, address_m);
97401+ if (!pud_m)
97402+ return VM_FAULT_OOM;
97403+ pmd_m = pmd_alloc(mm, pud_m, address_m);
97404+ if (!pmd_m)
97405+ return VM_FAULT_OOM;
97406+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
97407+ return VM_FAULT_OOM;
97408+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
97409+ }
97410+#endif
97411+
97412 pgd = pgd_offset(mm, address);
97413 pud = pud_alloc(mm, pgd, address);
97414 if (!pud)
97415@@ -3399,6 +3659,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
97416 spin_unlock(&mm->page_table_lock);
97417 return 0;
97418 }
97419+
97420+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
97421+{
97422+ pud_t *new = pud_alloc_one(mm, address);
97423+ if (!new)
97424+ return -ENOMEM;
97425+
97426+ smp_wmb(); /* See comment in __pte_alloc */
97427+
97428+ spin_lock(&mm->page_table_lock);
97429+ if (pgd_present(*pgd)) /* Another has populated it */
97430+ pud_free(mm, new);
97431+ else
97432+ pgd_populate_kernel(mm, pgd, new);
97433+ spin_unlock(&mm->page_table_lock);
97434+ return 0;
97435+}
97436 #endif /* __PAGETABLE_PUD_FOLDED */
97437
97438 #ifndef __PAGETABLE_PMD_FOLDED
97439@@ -3429,6 +3706,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
97440 spin_unlock(&mm->page_table_lock);
97441 return 0;
97442 }
97443+
97444+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
97445+{
97446+ pmd_t *new = pmd_alloc_one(mm, address);
97447+ if (!new)
97448+ return -ENOMEM;
97449+
97450+ smp_wmb(); /* See comment in __pte_alloc */
97451+
97452+ spin_lock(&mm->page_table_lock);
97453+#ifndef __ARCH_HAS_4LEVEL_HACK
97454+ if (pud_present(*pud)) /* Another has populated it */
97455+ pmd_free(mm, new);
97456+ else
97457+ pud_populate_kernel(mm, pud, new);
97458+#else
97459+ if (pgd_present(*pud)) /* Another has populated it */
97460+ pmd_free(mm, new);
97461+ else
97462+ pgd_populate_kernel(mm, pud, new);
97463+#endif /* __ARCH_HAS_4LEVEL_HACK */
97464+ spin_unlock(&mm->page_table_lock);
97465+ return 0;
97466+}
97467 #endif /* __PAGETABLE_PMD_FOLDED */
97468
97469 static int __follow_pte(struct mm_struct *mm, unsigned long address,
97470@@ -3538,8 +3839,8 @@ out:
97471 return ret;
97472 }
97473
97474-int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
97475- void *buf, int len, int write)
97476+ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
97477+ void *buf, size_t len, int write)
97478 {
97479 resource_size_t phys_addr;
97480 unsigned long prot = 0;
97481@@ -3565,8 +3866,8 @@ EXPORT_SYMBOL_GPL(generic_access_phys);
97482 * Access another process' address space as given in mm. If non-NULL, use the
97483 * given task for page fault accounting.
97484 */
97485-static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
97486- unsigned long addr, void *buf, int len, int write)
97487+static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
97488+ unsigned long addr, void *buf, size_t len, int write)
97489 {
97490 struct vm_area_struct *vma;
97491 void *old_buf = buf;
97492@@ -3574,7 +3875,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
97493 down_read(&mm->mmap_sem);
97494 /* ignore errors, just check how much was successfully transferred */
97495 while (len) {
97496- int bytes, ret, offset;
97497+ ssize_t bytes, ret, offset;
97498 void *maddr;
97499 struct page *page = NULL;
97500
97501@@ -3635,8 +3936,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
97502 *
97503 * The caller must hold a reference on @mm.
97504 */
97505-int access_remote_vm(struct mm_struct *mm, unsigned long addr,
97506- void *buf, int len, int write)
97507+ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
97508+ void *buf, size_t len, int write)
97509 {
97510 return __access_remote_vm(NULL, mm, addr, buf, len, write);
97511 }
97512@@ -3646,11 +3947,11 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
97513 * Source/target buffer must be kernel space,
97514 * Do not walk the page table directly, use get_user_pages
97515 */
97516-int access_process_vm(struct task_struct *tsk, unsigned long addr,
97517- void *buf, int len, int write)
97518+ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr,
97519+ void *buf, size_t len, int write)
97520 {
97521 struct mm_struct *mm;
97522- int ret;
97523+ ssize_t ret;
97524
97525 mm = get_task_mm(tsk);
97526 if (!mm)
97527diff --git a/mm/mempolicy.c b/mm/mempolicy.c
97528index 8f5330d..b41914b 100644
97529--- a/mm/mempolicy.c
97530+++ b/mm/mempolicy.c
97531@@ -750,6 +750,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
97532 unsigned long vmstart;
97533 unsigned long vmend;
97534
97535+#ifdef CONFIG_PAX_SEGMEXEC
97536+ struct vm_area_struct *vma_m;
97537+#endif
97538+
97539 vma = find_vma(mm, start);
97540 if (!vma || vma->vm_start > start)
97541 return -EFAULT;
97542@@ -793,6 +797,16 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
97543 err = vma_replace_policy(vma, new_pol);
97544 if (err)
97545 goto out;
97546+
97547+#ifdef CONFIG_PAX_SEGMEXEC
97548+ vma_m = pax_find_mirror_vma(vma);
97549+ if (vma_m) {
97550+ err = vma_replace_policy(vma_m, new_pol);
97551+ if (err)
97552+ goto out;
97553+ }
97554+#endif
97555+
97556 }
97557
97558 out:
97559@@ -1225,6 +1239,17 @@ static long do_mbind(unsigned long start, unsigned long len,
97560
97561 if (end < start)
97562 return -EINVAL;
97563+
97564+#ifdef CONFIG_PAX_SEGMEXEC
97565+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
97566+ if (end > SEGMEXEC_TASK_SIZE)
97567+ return -EINVAL;
97568+ } else
97569+#endif
97570+
97571+ if (end > TASK_SIZE)
97572+ return -EINVAL;
97573+
97574 if (end == start)
97575 return 0;
97576
97577@@ -1450,8 +1475,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
97578 */
97579 tcred = __task_cred(task);
97580 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
97581- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
97582- !capable(CAP_SYS_NICE)) {
97583+ !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
97584 rcu_read_unlock();
97585 err = -EPERM;
97586 goto out_put;
97587@@ -1482,6 +1506,15 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
97588 goto out;
97589 }
97590
97591+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
97592+ if (mm != current->mm &&
97593+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
97594+ mmput(mm);
97595+ err = -EPERM;
97596+ goto out;
97597+ }
97598+#endif
97599+
97600 err = do_migrate_pages(mm, old, new,
97601 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
97602
97603diff --git a/mm/migrate.c b/mm/migrate.c
97604index 0143995..b294728 100644
97605--- a/mm/migrate.c
97606+++ b/mm/migrate.c
97607@@ -1495,8 +1495,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
97608 */
97609 tcred = __task_cred(task);
97610 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
97611- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
97612- !capable(CAP_SYS_NICE)) {
97613+ !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
97614 rcu_read_unlock();
97615 err = -EPERM;
97616 goto out;
97617diff --git a/mm/mlock.c b/mm/mlock.c
97618index ce84cb0..6d5a9aa 100644
97619--- a/mm/mlock.c
97620+++ b/mm/mlock.c
97621@@ -14,6 +14,7 @@
97622 #include <linux/pagevec.h>
97623 #include <linux/mempolicy.h>
97624 #include <linux/syscalls.h>
97625+#include <linux/security.h>
97626 #include <linux/sched.h>
97627 #include <linux/export.h>
97628 #include <linux/rmap.h>
97629@@ -613,7 +614,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
97630 {
97631 unsigned long nstart, end, tmp;
97632 struct vm_area_struct * vma, * prev;
97633- int error;
97634+ int error = 0;
97635
97636 VM_BUG_ON(start & ~PAGE_MASK);
97637 VM_BUG_ON(len != PAGE_ALIGN(len));
97638@@ -622,6 +623,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
97639 return -EINVAL;
97640 if (end == start)
97641 return 0;
97642+ if (end > TASK_SIZE)
97643+ return -EINVAL;
97644+
97645 vma = find_vma(current->mm, start);
97646 if (!vma || vma->vm_start > start)
97647 return -ENOMEM;
97648@@ -633,6 +637,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
97649 for (nstart = start ; ; ) {
97650 vm_flags_t newflags;
97651
97652+#ifdef CONFIG_PAX_SEGMEXEC
97653+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
97654+ break;
97655+#endif
97656+
97657 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
97658
97659 newflags = vma->vm_flags & ~VM_LOCKED;
97660@@ -746,6 +755,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
97661 locked += current->mm->locked_vm;
97662
97663 /* check against resource limits */
97664+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
97665 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
97666 error = do_mlock(start, len, 1);
97667
97668@@ -783,6 +793,11 @@ static int do_mlockall(int flags)
97669 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
97670 vm_flags_t newflags;
97671
97672+#ifdef CONFIG_PAX_SEGMEXEC
97673+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
97674+ break;
97675+#endif
97676+
97677 newflags = vma->vm_flags & ~VM_LOCKED;
97678 if (flags & MCL_CURRENT)
97679 newflags |= VM_LOCKED;
97680@@ -814,8 +829,10 @@ SYSCALL_DEFINE1(mlockall, int, flags)
97681 lock_limit >>= PAGE_SHIFT;
97682
97683 ret = -ENOMEM;
97684+
97685+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
97686+
97687 down_write(&current->mm->mmap_sem);
97688-
97689 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
97690 capable(CAP_IPC_LOCK))
97691 ret = do_mlockall(flags);
97692diff --git a/mm/mmap.c b/mm/mmap.c
97693index c0a3637..c760814 100644
97694--- a/mm/mmap.c
97695+++ b/mm/mmap.c
97696@@ -41,6 +41,7 @@
97697 #include <linux/notifier.h>
97698 #include <linux/memory.h>
97699 #include <linux/printk.h>
97700+#include <linux/random.h>
97701
97702 #include <asm/uaccess.h>
97703 #include <asm/cacheflush.h>
97704@@ -57,6 +58,16 @@
97705 #define arch_rebalance_pgtables(addr, len) (addr)
97706 #endif
97707
97708+static inline void verify_mm_writelocked(struct mm_struct *mm)
97709+{
97710+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
97711+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
97712+ up_read(&mm->mmap_sem);
97713+ BUG();
97714+ }
97715+#endif
97716+}
97717+
97718 static void unmap_region(struct mm_struct *mm,
97719 struct vm_area_struct *vma, struct vm_area_struct *prev,
97720 unsigned long start, unsigned long end);
97721@@ -76,16 +87,25 @@ static void unmap_region(struct mm_struct *mm,
97722 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
97723 *
97724 */
97725-pgprot_t protection_map[16] = {
97726+pgprot_t protection_map[16] __read_only = {
97727 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
97728 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
97729 };
97730
97731-pgprot_t vm_get_page_prot(unsigned long vm_flags)
97732+pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
97733 {
97734- return __pgprot(pgprot_val(protection_map[vm_flags &
97735+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
97736 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
97737 pgprot_val(arch_vm_get_page_prot(vm_flags)));
97738+
97739+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
97740+ if (!(__supported_pte_mask & _PAGE_NX) &&
97741+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
97742+ (vm_flags & (VM_READ | VM_WRITE)))
97743+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
97744+#endif
97745+
97746+ return prot;
97747 }
97748 EXPORT_SYMBOL(vm_get_page_prot);
97749
97750@@ -95,6 +115,7 @@ unsigned long sysctl_overcommit_kbytes __read_mostly;
97751 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
97752 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
97753 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
97754+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
97755 /*
97756 * Make sure vm_committed_as in one cacheline and not cacheline shared with
97757 * other variables. It can be updated by several CPUs frequently.
97758@@ -255,6 +276,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
97759 struct vm_area_struct *next = vma->vm_next;
97760
97761 might_sleep();
97762+ BUG_ON(vma->vm_mirror);
97763 if (vma->vm_ops && vma->vm_ops->close)
97764 vma->vm_ops->close(vma);
97765 if (vma->vm_file)
97766@@ -299,6 +321,12 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
97767 * not page aligned -Ram Gupta
97768 */
97769 rlim = rlimit(RLIMIT_DATA);
97770+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
97771+ /* force a minimum 16MB brk heap on setuid/setgid binaries */
97772+ if (rlim < PAGE_SIZE && (get_dumpable(mm) != SUID_DUMP_USER) && gr_is_global_nonroot(current_uid()))
97773+ rlim = 4096 * PAGE_SIZE;
97774+#endif
97775+ gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
97776 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
97777 (mm->end_data - mm->start_data) > rlim)
97778 goto out;
97779@@ -949,6 +977,12 @@ static int
97780 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
97781 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
97782 {
97783+
97784+#ifdef CONFIG_PAX_SEGMEXEC
97785+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
97786+ return 0;
97787+#endif
97788+
97789 if (is_mergeable_vma(vma, file, vm_flags) &&
97790 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
97791 if (vma->vm_pgoff == vm_pgoff)
97792@@ -968,6 +1002,12 @@ static int
97793 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
97794 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
97795 {
97796+
97797+#ifdef CONFIG_PAX_SEGMEXEC
97798+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
97799+ return 0;
97800+#endif
97801+
97802 if (is_mergeable_vma(vma, file, vm_flags) &&
97803 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
97804 pgoff_t vm_pglen;
97805@@ -1010,13 +1050,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
97806 struct vm_area_struct *vma_merge(struct mm_struct *mm,
97807 struct vm_area_struct *prev, unsigned long addr,
97808 unsigned long end, unsigned long vm_flags,
97809- struct anon_vma *anon_vma, struct file *file,
97810+ struct anon_vma *anon_vma, struct file *file,
97811 pgoff_t pgoff, struct mempolicy *policy)
97812 {
97813 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
97814 struct vm_area_struct *area, *next;
97815 int err;
97816
97817+#ifdef CONFIG_PAX_SEGMEXEC
97818+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
97819+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
97820+
97821+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
97822+#endif
97823+
97824 /*
97825 * We later require that vma->vm_flags == vm_flags,
97826 * so this tests vma->vm_flags & VM_SPECIAL, too.
97827@@ -1032,6 +1079,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
97828 if (next && next->vm_end == end) /* cases 6, 7, 8 */
97829 next = next->vm_next;
97830
97831+#ifdef CONFIG_PAX_SEGMEXEC
97832+ if (prev)
97833+ prev_m = pax_find_mirror_vma(prev);
97834+ if (area)
97835+ area_m = pax_find_mirror_vma(area);
97836+ if (next)
97837+ next_m = pax_find_mirror_vma(next);
97838+#endif
97839+
97840 /*
97841 * Can it merge with the predecessor?
97842 */
97843@@ -1051,9 +1107,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
97844 /* cases 1, 6 */
97845 err = vma_adjust(prev, prev->vm_start,
97846 next->vm_end, prev->vm_pgoff, NULL);
97847- } else /* cases 2, 5, 7 */
97848+
97849+#ifdef CONFIG_PAX_SEGMEXEC
97850+ if (!err && prev_m)
97851+ err = vma_adjust(prev_m, prev_m->vm_start,
97852+ next_m->vm_end, prev_m->vm_pgoff, NULL);
97853+#endif
97854+
97855+ } else { /* cases 2, 5, 7 */
97856 err = vma_adjust(prev, prev->vm_start,
97857 end, prev->vm_pgoff, NULL);
97858+
97859+#ifdef CONFIG_PAX_SEGMEXEC
97860+ if (!err && prev_m)
97861+ err = vma_adjust(prev_m, prev_m->vm_start,
97862+ end_m, prev_m->vm_pgoff, NULL);
97863+#endif
97864+
97865+ }
97866 if (err)
97867 return NULL;
97868 khugepaged_enter_vma_merge(prev);
97869@@ -1067,12 +1138,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
97870 mpol_equal(policy, vma_policy(next)) &&
97871 can_vma_merge_before(next, vm_flags,
97872 anon_vma, file, pgoff+pglen)) {
97873- if (prev && addr < prev->vm_end) /* case 4 */
97874+ if (prev && addr < prev->vm_end) { /* case 4 */
97875 err = vma_adjust(prev, prev->vm_start,
97876 addr, prev->vm_pgoff, NULL);
97877- else /* cases 3, 8 */
97878+
97879+#ifdef CONFIG_PAX_SEGMEXEC
97880+ if (!err && prev_m)
97881+ err = vma_adjust(prev_m, prev_m->vm_start,
97882+ addr_m, prev_m->vm_pgoff, NULL);
97883+#endif
97884+
97885+ } else { /* cases 3, 8 */
97886 err = vma_adjust(area, addr, next->vm_end,
97887 next->vm_pgoff - pglen, NULL);
97888+
97889+#ifdef CONFIG_PAX_SEGMEXEC
97890+ if (!err && area_m)
97891+ err = vma_adjust(area_m, addr_m, next_m->vm_end,
97892+ next_m->vm_pgoff - pglen, NULL);
97893+#endif
97894+
97895+ }
97896 if (err)
97897 return NULL;
97898 khugepaged_enter_vma_merge(area);
97899@@ -1181,8 +1267,10 @@ none:
97900 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
97901 struct file *file, long pages)
97902 {
97903- const unsigned long stack_flags
97904- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
97905+
97906+#ifdef CONFIG_PAX_RANDMMAP
97907+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
97908+#endif
97909
97910 mm->total_vm += pages;
97911
97912@@ -1190,7 +1278,7 @@ void vm_stat_account(struct mm_struct *mm, unsigned long flags,
97913 mm->shared_vm += pages;
97914 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
97915 mm->exec_vm += pages;
97916- } else if (flags & stack_flags)
97917+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
97918 mm->stack_vm += pages;
97919 }
97920 #endif /* CONFIG_PROC_FS */
97921@@ -1220,6 +1308,7 @@ static inline int mlock_future_check(struct mm_struct *mm,
97922 locked += mm->locked_vm;
97923 lock_limit = rlimit(RLIMIT_MEMLOCK);
97924 lock_limit >>= PAGE_SHIFT;
97925+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
97926 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
97927 return -EAGAIN;
97928 }
97929@@ -1246,7 +1335,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
97930 * (the exception is when the underlying filesystem is noexec
97931 * mounted, in which case we dont add PROT_EXEC.)
97932 */
97933- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
97934+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
97935 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
97936 prot |= PROT_EXEC;
97937
97938@@ -1272,7 +1361,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
97939 /* Obtain the address to map to. we verify (or select) it and ensure
97940 * that it represents a valid section of the address space.
97941 */
97942- addr = get_unmapped_area(file, addr, len, pgoff, flags);
97943+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
97944 if (addr & ~PAGE_MASK)
97945 return addr;
97946
97947@@ -1283,6 +1372,43 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
97948 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
97949 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
97950
97951+#ifdef CONFIG_PAX_MPROTECT
97952+ if (mm->pax_flags & MF_PAX_MPROTECT) {
97953+
97954+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
97955+ if (file && !pgoff && (vm_flags & VM_EXEC) && mm->binfmt &&
97956+ mm->binfmt->handle_mmap)
97957+ mm->binfmt->handle_mmap(file);
97958+#endif
97959+
97960+#ifndef CONFIG_PAX_MPROTECT_COMPAT
97961+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
97962+ gr_log_rwxmmap(file);
97963+
97964+#ifdef CONFIG_PAX_EMUPLT
97965+ vm_flags &= ~VM_EXEC;
97966+#else
97967+ return -EPERM;
97968+#endif
97969+
97970+ }
97971+
97972+ if (!(vm_flags & VM_EXEC))
97973+ vm_flags &= ~VM_MAYEXEC;
97974+#else
97975+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
97976+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
97977+#endif
97978+ else
97979+ vm_flags &= ~VM_MAYWRITE;
97980+ }
97981+#endif
97982+
97983+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
97984+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
97985+ vm_flags &= ~VM_PAGEEXEC;
97986+#endif
97987+
97988 if (flags & MAP_LOCKED)
97989 if (!can_do_mlock())
97990 return -EPERM;
97991@@ -1370,6 +1496,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
97992 vm_flags |= VM_NORESERVE;
97993 }
97994
97995+ if (!gr_acl_handle_mmap(file, prot))
97996+ return -EACCES;
97997+
97998 addr = mmap_region(file, addr, len, vm_flags, pgoff);
97999 if (!IS_ERR_VALUE(addr) &&
98000 ((vm_flags & VM_LOCKED) ||
98001@@ -1463,7 +1592,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
98002 vm_flags_t vm_flags = vma->vm_flags;
98003
98004 /* If it was private or non-writable, the write bit is already clear */
98005- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
98006+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
98007 return 0;
98008
98009 /* The backer wishes to know when pages are first written to? */
98010@@ -1509,7 +1638,22 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
98011 struct rb_node **rb_link, *rb_parent;
98012 unsigned long charged = 0;
98013
98014+#ifdef CONFIG_PAX_SEGMEXEC
98015+ struct vm_area_struct *vma_m = NULL;
98016+#endif
98017+
98018+ /*
98019+ * mm->mmap_sem is required to protect against another thread
98020+ * changing the mappings in case we sleep.
98021+ */
98022+ verify_mm_writelocked(mm);
98023+
98024 /* Check against address space limit. */
98025+
98026+#ifdef CONFIG_PAX_RANDMMAP
98027+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (vm_flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
98028+#endif
98029+
98030 if (!may_expand_vm(mm, len >> PAGE_SHIFT)) {
98031 unsigned long nr_pages;
98032
98033@@ -1528,11 +1672,10 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
98034
98035 /* Clear old maps */
98036 error = -ENOMEM;
98037-munmap_back:
98038 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
98039 if (do_munmap(mm, addr, len))
98040 return -ENOMEM;
98041- goto munmap_back;
98042+ BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
98043 }
98044
98045 /*
98046@@ -1563,6 +1706,16 @@ munmap_back:
98047 goto unacct_error;
98048 }
98049
98050+#ifdef CONFIG_PAX_SEGMEXEC
98051+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
98052+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
98053+ if (!vma_m) {
98054+ error = -ENOMEM;
98055+ goto free_vma;
98056+ }
98057+ }
98058+#endif
98059+
98060 vma->vm_mm = mm;
98061 vma->vm_start = addr;
98062 vma->vm_end = addr + len;
98063@@ -1593,6 +1746,13 @@ munmap_back:
98064 if (error)
98065 goto unmap_and_free_vma;
98066
98067+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
98068+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
98069+ vma->vm_flags |= VM_PAGEEXEC;
98070+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
98071+ }
98072+#endif
98073+
98074 /* Can addr have changed??
98075 *
98076 * Answer: Yes, several device drivers can do it in their
98077@@ -1626,6 +1786,12 @@ munmap_back:
98078 }
98079
98080 vma_link(mm, vma, prev, rb_link, rb_parent);
98081+
98082+#ifdef CONFIG_PAX_SEGMEXEC
98083+ if (vma_m)
98084+ BUG_ON(pax_mirror_vma(vma_m, vma));
98085+#endif
98086+
98087 /* Once vma denies write, undo our temporary denial count */
98088 if (file) {
98089 if (vm_flags & VM_SHARED)
98090@@ -1638,6 +1804,7 @@ out:
98091 perf_event_mmap(vma);
98092
98093 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
98094+ track_exec_limit(mm, addr, addr + len, vm_flags);
98095 if (vm_flags & VM_LOCKED) {
98096 if (!((vm_flags & VM_SPECIAL) || is_vm_hugetlb_page(vma) ||
98097 vma == get_gate_vma(current->mm)))
98098@@ -1673,6 +1840,12 @@ allow_write_and_free_vma:
98099 if (vm_flags & VM_DENYWRITE)
98100 allow_write_access(file);
98101 free_vma:
98102+
98103+#ifdef CONFIG_PAX_SEGMEXEC
98104+ if (vma_m)
98105+ kmem_cache_free(vm_area_cachep, vma_m);
98106+#endif
98107+
98108 kmem_cache_free(vm_area_cachep, vma);
98109 unacct_error:
98110 if (charged)
98111@@ -1680,7 +1853,63 @@ unacct_error:
98112 return error;
98113 }
98114
98115-unsigned long unmapped_area(struct vm_unmapped_area_info *info)
98116+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
98117+unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
98118+{
98119+ if ((mm->pax_flags & MF_PAX_RANDMMAP) && !filp && (flags & MAP_STACK))
98120+ return ((prandom_u32() & 0xFF) + 1) << PAGE_SHIFT;
98121+
98122+ return 0;
98123+}
98124+#endif
98125+
98126+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset)
98127+{
98128+ if (!vma) {
98129+#ifdef CONFIG_STACK_GROWSUP
98130+ if (addr > sysctl_heap_stack_gap)
98131+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
98132+ else
98133+ vma = find_vma(current->mm, 0);
98134+ if (vma && (vma->vm_flags & VM_GROWSUP))
98135+ return false;
98136+#endif
98137+ return true;
98138+ }
98139+
98140+ if (addr + len > vma->vm_start)
98141+ return false;
98142+
98143+ if (vma->vm_flags & VM_GROWSDOWN)
98144+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
98145+#ifdef CONFIG_STACK_GROWSUP
98146+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
98147+ return addr - vma->vm_prev->vm_end >= sysctl_heap_stack_gap;
98148+#endif
98149+ else if (offset)
98150+ return offset <= vma->vm_start - addr - len;
98151+
98152+ return true;
98153+}
98154+
98155+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset)
98156+{
98157+ if (vma->vm_start < len)
98158+ return -ENOMEM;
98159+
98160+ if (!(vma->vm_flags & VM_GROWSDOWN)) {
98161+ if (offset <= vma->vm_start - len)
98162+ return vma->vm_start - len - offset;
98163+ else
98164+ return -ENOMEM;
98165+ }
98166+
98167+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
98168+ return vma->vm_start - len - sysctl_heap_stack_gap;
98169+ return -ENOMEM;
98170+}
98171+
98172+unsigned long unmapped_area(const struct vm_unmapped_area_info *info)
98173 {
98174 /*
98175 * We implement the search by looking for an rbtree node that
98176@@ -1728,11 +1957,29 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info)
98177 }
98178 }
98179
98180- gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
98181+ gap_start = vma->vm_prev ? vma->vm_prev->vm_end: 0;
98182 check_current:
98183 /* Check if current node has a suitable gap */
98184 if (gap_start > high_limit)
98185 return -ENOMEM;
98186+
98187+ if (gap_end - gap_start > info->threadstack_offset)
98188+ gap_start += info->threadstack_offset;
98189+ else
98190+ gap_start = gap_end;
98191+
98192+ if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP)) {
98193+ if (gap_end - gap_start > sysctl_heap_stack_gap)
98194+ gap_start += sysctl_heap_stack_gap;
98195+ else
98196+ gap_start = gap_end;
98197+ }
98198+ if (vma->vm_flags & VM_GROWSDOWN) {
98199+ if (gap_end - gap_start > sysctl_heap_stack_gap)
98200+ gap_end -= sysctl_heap_stack_gap;
98201+ else
98202+ gap_end = gap_start;
98203+ }
98204 if (gap_end >= low_limit && gap_end - gap_start >= length)
98205 goto found;
98206
98207@@ -1782,7 +2029,7 @@ found:
98208 return gap_start;
98209 }
98210
98211-unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
98212+unsigned long unmapped_area_topdown(const struct vm_unmapped_area_info *info)
98213 {
98214 struct mm_struct *mm = current->mm;
98215 struct vm_area_struct *vma;
98216@@ -1836,6 +2083,24 @@ check_current:
98217 gap_end = vma->vm_start;
98218 if (gap_end < low_limit)
98219 return -ENOMEM;
98220+
98221+ if (gap_end - gap_start > info->threadstack_offset)
98222+ gap_end -= info->threadstack_offset;
98223+ else
98224+ gap_end = gap_start;
98225+
98226+ if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP)) {
98227+ if (gap_end - gap_start > sysctl_heap_stack_gap)
98228+ gap_start += sysctl_heap_stack_gap;
98229+ else
98230+ gap_start = gap_end;
98231+ }
98232+ if (vma->vm_flags & VM_GROWSDOWN) {
98233+ if (gap_end - gap_start > sysctl_heap_stack_gap)
98234+ gap_end -= sysctl_heap_stack_gap;
98235+ else
98236+ gap_end = gap_start;
98237+ }
98238 if (gap_start <= high_limit && gap_end - gap_start >= length)
98239 goto found;
98240
98241@@ -1899,6 +2164,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
98242 struct mm_struct *mm = current->mm;
98243 struct vm_area_struct *vma;
98244 struct vm_unmapped_area_info info;
98245+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
98246
98247 if (len > TASK_SIZE - mmap_min_addr)
98248 return -ENOMEM;
98249@@ -1906,11 +2172,15 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
98250 if (flags & MAP_FIXED)
98251 return addr;
98252
98253+#ifdef CONFIG_PAX_RANDMMAP
98254+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
98255+#endif
98256+
98257 if (addr) {
98258 addr = PAGE_ALIGN(addr);
98259 vma = find_vma(mm, addr);
98260 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
98261- (!vma || addr + len <= vma->vm_start))
98262+ check_heap_stack_gap(vma, addr, len, offset))
98263 return addr;
98264 }
98265
98266@@ -1919,6 +2189,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
98267 info.low_limit = mm->mmap_base;
98268 info.high_limit = TASK_SIZE;
98269 info.align_mask = 0;
98270+ info.threadstack_offset = offset;
98271 return vm_unmapped_area(&info);
98272 }
98273 #endif
98274@@ -1937,6 +2208,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
98275 struct mm_struct *mm = current->mm;
98276 unsigned long addr = addr0;
98277 struct vm_unmapped_area_info info;
98278+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
98279
98280 /* requested length too big for entire address space */
98281 if (len > TASK_SIZE - mmap_min_addr)
98282@@ -1945,12 +2217,16 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
98283 if (flags & MAP_FIXED)
98284 return addr;
98285
98286+#ifdef CONFIG_PAX_RANDMMAP
98287+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
98288+#endif
98289+
98290 /* requesting a specific address */
98291 if (addr) {
98292 addr = PAGE_ALIGN(addr);
98293 vma = find_vma(mm, addr);
98294 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
98295- (!vma || addr + len <= vma->vm_start))
98296+ check_heap_stack_gap(vma, addr, len, offset))
98297 return addr;
98298 }
98299
98300@@ -1959,6 +2235,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
98301 info.low_limit = max(PAGE_SIZE, mmap_min_addr);
98302 info.high_limit = mm->mmap_base;
98303 info.align_mask = 0;
98304+ info.threadstack_offset = offset;
98305 addr = vm_unmapped_area(&info);
98306
98307 /*
98308@@ -1971,6 +2248,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
98309 VM_BUG_ON(addr != -ENOMEM);
98310 info.flags = 0;
98311 info.low_limit = TASK_UNMAPPED_BASE;
98312+
98313+#ifdef CONFIG_PAX_RANDMMAP
98314+ if (mm->pax_flags & MF_PAX_RANDMMAP)
98315+ info.low_limit += mm->delta_mmap;
98316+#endif
98317+
98318 info.high_limit = TASK_SIZE;
98319 addr = vm_unmapped_area(&info);
98320 }
98321@@ -2071,6 +2354,28 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
98322 return vma;
98323 }
98324
98325+#ifdef CONFIG_PAX_SEGMEXEC
98326+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
98327+{
98328+ struct vm_area_struct *vma_m;
98329+
98330+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
98331+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
98332+ BUG_ON(vma->vm_mirror);
98333+ return NULL;
98334+ }
98335+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
98336+ vma_m = vma->vm_mirror;
98337+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
98338+ BUG_ON(vma->vm_file != vma_m->vm_file);
98339+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
98340+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
98341+ BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
98342+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED));
98343+ return vma_m;
98344+}
98345+#endif
98346+
98347 /*
98348 * Verify that the stack growth is acceptable and
98349 * update accounting. This is shared with both the
98350@@ -2087,6 +2392,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
98351 return -ENOMEM;
98352
98353 /* Stack limit test */
98354+ gr_learn_resource(current, RLIMIT_STACK, size, 1);
98355 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
98356 return -ENOMEM;
98357
98358@@ -2097,6 +2403,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
98359 locked = mm->locked_vm + grow;
98360 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
98361 limit >>= PAGE_SHIFT;
98362+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
98363 if (locked > limit && !capable(CAP_IPC_LOCK))
98364 return -ENOMEM;
98365 }
98366@@ -2126,37 +2433,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
98367 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
98368 * vma is the last one with address > vma->vm_end. Have to extend vma.
98369 */
98370+#ifndef CONFIG_IA64
98371+static
98372+#endif
98373 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
98374 {
98375 int error;
98376+ bool locknext;
98377
98378 if (!(vma->vm_flags & VM_GROWSUP))
98379 return -EFAULT;
98380
98381+ /* Also guard against wrapping around to address 0. */
98382+ if (address < PAGE_ALIGN(address+1))
98383+ address = PAGE_ALIGN(address+1);
98384+ else
98385+ return -ENOMEM;
98386+
98387 /*
98388 * We must make sure the anon_vma is allocated
98389 * so that the anon_vma locking is not a noop.
98390 */
98391 if (unlikely(anon_vma_prepare(vma)))
98392 return -ENOMEM;
98393+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
98394+ if (locknext && anon_vma_prepare(vma->vm_next))
98395+ return -ENOMEM;
98396 vma_lock_anon_vma(vma);
98397+ if (locknext)
98398+ vma_lock_anon_vma(vma->vm_next);
98399
98400 /*
98401 * vma->vm_start/vm_end cannot change under us because the caller
98402 * is required to hold the mmap_sem in read mode. We need the
98403- * anon_vma lock to serialize against concurrent expand_stacks.
98404- * Also guard against wrapping around to address 0.
98405+ * anon_vma locks to serialize against concurrent expand_stacks
98406+ * and expand_upwards.
98407 */
98408- if (address < PAGE_ALIGN(address+4))
98409- address = PAGE_ALIGN(address+4);
98410- else {
98411- vma_unlock_anon_vma(vma);
98412- return -ENOMEM;
98413- }
98414 error = 0;
98415
98416 /* Somebody else might have raced and expanded it already */
98417- if (address > vma->vm_end) {
98418+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
98419+ error = -ENOMEM;
98420+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
98421 unsigned long size, grow;
98422
98423 size = address - vma->vm_start;
98424@@ -2191,6 +2509,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
98425 }
98426 }
98427 }
98428+ if (locknext)
98429+ vma_unlock_anon_vma(vma->vm_next);
98430 vma_unlock_anon_vma(vma);
98431 khugepaged_enter_vma_merge(vma);
98432 validate_mm(vma->vm_mm);
98433@@ -2205,6 +2525,8 @@ int expand_downwards(struct vm_area_struct *vma,
98434 unsigned long address)
98435 {
98436 int error;
98437+ bool lockprev = false;
98438+ struct vm_area_struct *prev;
98439
98440 /*
98441 * We must make sure the anon_vma is allocated
98442@@ -2218,6 +2540,15 @@ int expand_downwards(struct vm_area_struct *vma,
98443 if (error)
98444 return error;
98445
98446+ prev = vma->vm_prev;
98447+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
98448+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
98449+#endif
98450+ if (lockprev && anon_vma_prepare(prev))
98451+ return -ENOMEM;
98452+ if (lockprev)
98453+ vma_lock_anon_vma(prev);
98454+
98455 vma_lock_anon_vma(vma);
98456
98457 /*
98458@@ -2227,9 +2558,17 @@ int expand_downwards(struct vm_area_struct *vma,
98459 */
98460
98461 /* Somebody else might have raced and expanded it already */
98462- if (address < vma->vm_start) {
98463+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
98464+ error = -ENOMEM;
98465+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
98466 unsigned long size, grow;
98467
98468+#ifdef CONFIG_PAX_SEGMEXEC
98469+ struct vm_area_struct *vma_m;
98470+
98471+ vma_m = pax_find_mirror_vma(vma);
98472+#endif
98473+
98474 size = vma->vm_end - address;
98475 grow = (vma->vm_start - address) >> PAGE_SHIFT;
98476
98477@@ -2254,13 +2593,27 @@ int expand_downwards(struct vm_area_struct *vma,
98478 vma->vm_pgoff -= grow;
98479 anon_vma_interval_tree_post_update_vma(vma);
98480 vma_gap_update(vma);
98481+
98482+#ifdef CONFIG_PAX_SEGMEXEC
98483+ if (vma_m) {
98484+ anon_vma_interval_tree_pre_update_vma(vma_m);
98485+ vma_m->vm_start -= grow << PAGE_SHIFT;
98486+ vma_m->vm_pgoff -= grow;
98487+ anon_vma_interval_tree_post_update_vma(vma_m);
98488+ vma_gap_update(vma_m);
98489+ }
98490+#endif
98491+
98492 spin_unlock(&vma->vm_mm->page_table_lock);
98493
98494+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
98495 perf_event_mmap(vma);
98496 }
98497 }
98498 }
98499 vma_unlock_anon_vma(vma);
98500+ if (lockprev)
98501+ vma_unlock_anon_vma(prev);
98502 khugepaged_enter_vma_merge(vma);
98503 validate_mm(vma->vm_mm);
98504 return error;
98505@@ -2358,6 +2711,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
98506 do {
98507 long nrpages = vma_pages(vma);
98508
98509+#ifdef CONFIG_PAX_SEGMEXEC
98510+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
98511+ vma = remove_vma(vma);
98512+ continue;
98513+ }
98514+#endif
98515+
98516 if (vma->vm_flags & VM_ACCOUNT)
98517 nr_accounted += nrpages;
98518 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
98519@@ -2402,6 +2762,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
98520 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
98521 vma->vm_prev = NULL;
98522 do {
98523+
98524+#ifdef CONFIG_PAX_SEGMEXEC
98525+ if (vma->vm_mirror) {
98526+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
98527+ vma->vm_mirror->vm_mirror = NULL;
98528+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
98529+ vma->vm_mirror = NULL;
98530+ }
98531+#endif
98532+
98533 vma_rb_erase(vma, &mm->mm_rb);
98534 mm->map_count--;
98535 tail_vma = vma;
98536@@ -2429,14 +2799,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
98537 struct vm_area_struct *new;
98538 int err = -ENOMEM;
98539
98540+#ifdef CONFIG_PAX_SEGMEXEC
98541+ struct vm_area_struct *vma_m, *new_m = NULL;
98542+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
98543+#endif
98544+
98545 if (is_vm_hugetlb_page(vma) && (addr &
98546 ~(huge_page_mask(hstate_vma(vma)))))
98547 return -EINVAL;
98548
98549+#ifdef CONFIG_PAX_SEGMEXEC
98550+ vma_m = pax_find_mirror_vma(vma);
98551+#endif
98552+
98553 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
98554 if (!new)
98555 goto out_err;
98556
98557+#ifdef CONFIG_PAX_SEGMEXEC
98558+ if (vma_m) {
98559+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
98560+ if (!new_m) {
98561+ kmem_cache_free(vm_area_cachep, new);
98562+ goto out_err;
98563+ }
98564+ }
98565+#endif
98566+
98567 /* most fields are the same, copy all, and then fixup */
98568 *new = *vma;
98569
98570@@ -2449,6 +2838,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
98571 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
98572 }
98573
98574+#ifdef CONFIG_PAX_SEGMEXEC
98575+ if (vma_m) {
98576+ *new_m = *vma_m;
98577+ INIT_LIST_HEAD(&new_m->anon_vma_chain);
98578+ new_m->vm_mirror = new;
98579+ new->vm_mirror = new_m;
98580+
98581+ if (new_below)
98582+ new_m->vm_end = addr_m;
98583+ else {
98584+ new_m->vm_start = addr_m;
98585+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
98586+ }
98587+ }
98588+#endif
98589+
98590 err = vma_dup_policy(vma, new);
98591 if (err)
98592 goto out_free_vma;
98593@@ -2468,6 +2873,38 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
98594 else
98595 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
98596
98597+#ifdef CONFIG_PAX_SEGMEXEC
98598+ if (!err && vma_m) {
98599+ struct mempolicy *pol = vma_policy(new);
98600+
98601+ if (anon_vma_clone(new_m, vma_m))
98602+ goto out_free_mpol;
98603+
98604+ mpol_get(pol);
98605+ set_vma_policy(new_m, pol);
98606+
98607+ if (new_m->vm_file)
98608+ get_file(new_m->vm_file);
98609+
98610+ if (new_m->vm_ops && new_m->vm_ops->open)
98611+ new_m->vm_ops->open(new_m);
98612+
98613+ if (new_below)
98614+ err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
98615+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
98616+ else
98617+ err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
98618+
98619+ if (err) {
98620+ if (new_m->vm_ops && new_m->vm_ops->close)
98621+ new_m->vm_ops->close(new_m);
98622+ if (new_m->vm_file)
98623+ fput(new_m->vm_file);
98624+ mpol_put(pol);
98625+ }
98626+ }
98627+#endif
98628+
98629 /* Success. */
98630 if (!err)
98631 return 0;
98632@@ -2477,10 +2914,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
98633 new->vm_ops->close(new);
98634 if (new->vm_file)
98635 fput(new->vm_file);
98636- unlink_anon_vmas(new);
98637 out_free_mpol:
98638 mpol_put(vma_policy(new));
98639 out_free_vma:
98640+
98641+#ifdef CONFIG_PAX_SEGMEXEC
98642+ if (new_m) {
98643+ unlink_anon_vmas(new_m);
98644+ kmem_cache_free(vm_area_cachep, new_m);
98645+ }
98646+#endif
98647+
98648+ unlink_anon_vmas(new);
98649 kmem_cache_free(vm_area_cachep, new);
98650 out_err:
98651 return err;
98652@@ -2493,6 +2938,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
98653 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
98654 unsigned long addr, int new_below)
98655 {
98656+
98657+#ifdef CONFIG_PAX_SEGMEXEC
98658+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
98659+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
98660+ if (mm->map_count >= sysctl_max_map_count-1)
98661+ return -ENOMEM;
98662+ } else
98663+#endif
98664+
98665 if (mm->map_count >= sysctl_max_map_count)
98666 return -ENOMEM;
98667
98668@@ -2504,11 +2958,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
98669 * work. This now handles partial unmappings.
98670 * Jeremy Fitzhardinge <jeremy@goop.org>
98671 */
98672+#ifdef CONFIG_PAX_SEGMEXEC
98673 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
98674 {
98675+ int ret = __do_munmap(mm, start, len);
98676+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
98677+ return ret;
98678+
98679+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
98680+}
98681+
98682+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
98683+#else
98684+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
98685+#endif
98686+{
98687 unsigned long end;
98688 struct vm_area_struct *vma, *prev, *last;
98689
98690+ /*
98691+ * mm->mmap_sem is required to protect against another thread
98692+ * changing the mappings in case we sleep.
98693+ */
98694+ verify_mm_writelocked(mm);
98695+
98696 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
98697 return -EINVAL;
98698
98699@@ -2583,6 +3056,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
98700 /* Fix up all other VM information */
98701 remove_vma_list(mm, vma);
98702
98703+ track_exec_limit(mm, start, end, 0UL);
98704+
98705 return 0;
98706 }
98707
98708@@ -2591,6 +3066,13 @@ int vm_munmap(unsigned long start, size_t len)
98709 int ret;
98710 struct mm_struct *mm = current->mm;
98711
98712+
98713+#ifdef CONFIG_PAX_SEGMEXEC
98714+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
98715+ (len > SEGMEXEC_TASK_SIZE || start > SEGMEXEC_TASK_SIZE-len))
98716+ return -EINVAL;
98717+#endif
98718+
98719 down_write(&mm->mmap_sem);
98720 ret = do_munmap(mm, start, len);
98721 up_write(&mm->mmap_sem);
98722@@ -2604,16 +3086,6 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
98723 return vm_munmap(addr, len);
98724 }
98725
98726-static inline void verify_mm_writelocked(struct mm_struct *mm)
98727-{
98728-#ifdef CONFIG_DEBUG_VM
98729- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
98730- WARN_ON(1);
98731- up_read(&mm->mmap_sem);
98732- }
98733-#endif
98734-}
98735-
98736 /*
98737 * this is really a simplified "do_mmap". it only handles
98738 * anonymous maps. eventually we may be able to do some
98739@@ -2627,6 +3099,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
98740 struct rb_node ** rb_link, * rb_parent;
98741 pgoff_t pgoff = addr >> PAGE_SHIFT;
98742 int error;
98743+ unsigned long charged;
98744
98745 len = PAGE_ALIGN(len);
98746 if (!len)
98747@@ -2634,10 +3107,24 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
98748
98749 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
98750
98751+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
98752+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
98753+ flags &= ~VM_EXEC;
98754+
98755+#ifdef CONFIG_PAX_MPROTECT
98756+ if (mm->pax_flags & MF_PAX_MPROTECT)
98757+ flags &= ~VM_MAYEXEC;
98758+#endif
98759+
98760+ }
98761+#endif
98762+
98763 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
98764 if (error & ~PAGE_MASK)
98765 return error;
98766
98767+ charged = len >> PAGE_SHIFT;
98768+
98769 error = mlock_future_check(mm, mm->def_flags, len);
98770 if (error)
98771 return error;
98772@@ -2651,21 +3138,20 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
98773 /*
98774 * Clear old maps. this also does some error checking for us
98775 */
98776- munmap_back:
98777 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
98778 if (do_munmap(mm, addr, len))
98779 return -ENOMEM;
98780- goto munmap_back;
98781+ BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
98782 }
98783
98784 /* Check against address space limits *after* clearing old maps... */
98785- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
98786+ if (!may_expand_vm(mm, charged))
98787 return -ENOMEM;
98788
98789 if (mm->map_count > sysctl_max_map_count)
98790 return -ENOMEM;
98791
98792- if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
98793+ if (security_vm_enough_memory_mm(mm, charged))
98794 return -ENOMEM;
98795
98796 /* Can we just expand an old private anonymous mapping? */
98797@@ -2679,7 +3165,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
98798 */
98799 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
98800 if (!vma) {
98801- vm_unacct_memory(len >> PAGE_SHIFT);
98802+ vm_unacct_memory(charged);
98803 return -ENOMEM;
98804 }
98805
98806@@ -2693,10 +3179,11 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
98807 vma_link(mm, vma, prev, rb_link, rb_parent);
98808 out:
98809 perf_event_mmap(vma);
98810- mm->total_vm += len >> PAGE_SHIFT;
98811+ mm->total_vm += charged;
98812 if (flags & VM_LOCKED)
98813- mm->locked_vm += (len >> PAGE_SHIFT);
98814+ mm->locked_vm += charged;
98815 vma->vm_flags |= VM_SOFTDIRTY;
98816+ track_exec_limit(mm, addr, addr + len, flags);
98817 return addr;
98818 }
98819
98820@@ -2758,6 +3245,7 @@ void exit_mmap(struct mm_struct *mm)
98821 while (vma) {
98822 if (vma->vm_flags & VM_ACCOUNT)
98823 nr_accounted += vma_pages(vma);
98824+ vma->vm_mirror = NULL;
98825 vma = remove_vma(vma);
98826 }
98827 vm_unacct_memory(nr_accounted);
98828@@ -2775,6 +3263,13 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
98829 struct vm_area_struct *prev;
98830 struct rb_node **rb_link, *rb_parent;
98831
98832+#ifdef CONFIG_PAX_SEGMEXEC
98833+ struct vm_area_struct *vma_m = NULL;
98834+#endif
98835+
98836+ if (security_mmap_addr(vma->vm_start))
98837+ return -EPERM;
98838+
98839 /*
98840 * The vm_pgoff of a purely anonymous vma should be irrelevant
98841 * until its first write fault, when page's anon_vma and index
98842@@ -2798,7 +3293,21 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
98843 security_vm_enough_memory_mm(mm, vma_pages(vma)))
98844 return -ENOMEM;
98845
98846+#ifdef CONFIG_PAX_SEGMEXEC
98847+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
98848+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
98849+ if (!vma_m)
98850+ return -ENOMEM;
98851+ }
98852+#endif
98853+
98854 vma_link(mm, vma, prev, rb_link, rb_parent);
98855+
98856+#ifdef CONFIG_PAX_SEGMEXEC
98857+ if (vma_m)
98858+ BUG_ON(pax_mirror_vma(vma_m, vma));
98859+#endif
98860+
98861 return 0;
98862 }
98863
98864@@ -2817,6 +3326,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
98865 struct rb_node **rb_link, *rb_parent;
98866 bool faulted_in_anon_vma = true;
98867
98868+ BUG_ON(vma->vm_mirror);
98869+
98870 /*
98871 * If anonymous vma has not yet been faulted, update new pgoff
98872 * to match new location, to increase its chance of merging.
98873@@ -2881,6 +3392,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
98874 return NULL;
98875 }
98876
98877+#ifdef CONFIG_PAX_SEGMEXEC
98878+long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
98879+{
98880+ struct vm_area_struct *prev_m;
98881+ struct rb_node **rb_link_m, *rb_parent_m;
98882+ struct mempolicy *pol_m;
98883+
98884+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
98885+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
98886+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
98887+ *vma_m = *vma;
98888+ INIT_LIST_HEAD(&vma_m->anon_vma_chain);
98889+ if (anon_vma_clone(vma_m, vma))
98890+ return -ENOMEM;
98891+ pol_m = vma_policy(vma_m);
98892+ mpol_get(pol_m);
98893+ set_vma_policy(vma_m, pol_m);
98894+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
98895+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
98896+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
98897+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
98898+ if (vma_m->vm_file)
98899+ get_file(vma_m->vm_file);
98900+ if (vma_m->vm_ops && vma_m->vm_ops->open)
98901+ vma_m->vm_ops->open(vma_m);
98902+ BUG_ON(find_vma_links(vma->vm_mm, vma_m->vm_start, vma_m->vm_end, &prev_m, &rb_link_m, &rb_parent_m));
98903+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
98904+ vma_m->vm_mirror = vma;
98905+ vma->vm_mirror = vma_m;
98906+ return 0;
98907+}
98908+#endif
98909+
98910 /*
98911 * Return true if the calling process may expand its vm space by the passed
98912 * number of pages
98913@@ -2892,6 +3436,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
98914
98915 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
98916
98917+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
98918 if (cur + npages > lim)
98919 return 0;
98920 return 1;
98921@@ -2974,6 +3519,22 @@ static struct vm_area_struct *__install_special_mapping(
98922 vma->vm_start = addr;
98923 vma->vm_end = addr + len;
98924
98925+#ifdef CONFIG_PAX_MPROTECT
98926+ if (mm->pax_flags & MF_PAX_MPROTECT) {
98927+#ifndef CONFIG_PAX_MPROTECT_COMPAT
98928+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
98929+ return ERR_PTR(-EPERM);
98930+ if (!(vm_flags & VM_EXEC))
98931+ vm_flags &= ~VM_MAYEXEC;
98932+#else
98933+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
98934+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
98935+#endif
98936+ else
98937+ vm_flags &= ~VM_MAYWRITE;
98938+ }
98939+#endif
98940+
98941 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND | VM_SOFTDIRTY;
98942 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
98943
98944diff --git a/mm/mprotect.c b/mm/mprotect.c
98945index c43d557..0b7ccd2 100644
98946--- a/mm/mprotect.c
98947+++ b/mm/mprotect.c
98948@@ -24,10 +24,18 @@
98949 #include <linux/migrate.h>
98950 #include <linux/perf_event.h>
98951 #include <linux/ksm.h>
98952+#include <linux/sched/sysctl.h>
98953+
98954+#ifdef CONFIG_PAX_MPROTECT
98955+#include <linux/elf.h>
98956+#include <linux/binfmts.h>
98957+#endif
98958+
98959 #include <asm/uaccess.h>
98960 #include <asm/pgtable.h>
98961 #include <asm/cacheflush.h>
98962 #include <asm/tlbflush.h>
98963+#include <asm/mmu_context.h>
98964
98965 #ifndef pgprot_modify
98966 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
98967@@ -256,6 +264,48 @@ unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
98968 return pages;
98969 }
98970
98971+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
98972+/* called while holding the mmap semaphor for writing except stack expansion */
98973+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
98974+{
98975+ unsigned long oldlimit, newlimit = 0UL;
98976+
98977+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
98978+ return;
98979+
98980+ spin_lock(&mm->page_table_lock);
98981+ oldlimit = mm->context.user_cs_limit;
98982+ if ((prot & VM_EXEC) && oldlimit < end)
98983+ /* USER_CS limit moved up */
98984+ newlimit = end;
98985+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
98986+ /* USER_CS limit moved down */
98987+ newlimit = start;
98988+
98989+ if (newlimit) {
98990+ mm->context.user_cs_limit = newlimit;
98991+
98992+#ifdef CONFIG_SMP
98993+ wmb();
98994+ cpus_clear(mm->context.cpu_user_cs_mask);
98995+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
98996+#endif
98997+
98998+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
98999+ }
99000+ spin_unlock(&mm->page_table_lock);
99001+ if (newlimit == end) {
99002+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
99003+
99004+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
99005+ if (is_vm_hugetlb_page(vma))
99006+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
99007+ else
99008+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma), 0);
99009+ }
99010+}
99011+#endif
99012+
99013 int
99014 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
99015 unsigned long start, unsigned long end, unsigned long newflags)
99016@@ -268,11 +318,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
99017 int error;
99018 int dirty_accountable = 0;
99019
99020+#ifdef CONFIG_PAX_SEGMEXEC
99021+ struct vm_area_struct *vma_m = NULL;
99022+ unsigned long start_m, end_m;
99023+
99024+ start_m = start + SEGMEXEC_TASK_SIZE;
99025+ end_m = end + SEGMEXEC_TASK_SIZE;
99026+#endif
99027+
99028 if (newflags == oldflags) {
99029 *pprev = vma;
99030 return 0;
99031 }
99032
99033+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
99034+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
99035+
99036+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
99037+ return -ENOMEM;
99038+
99039+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
99040+ return -ENOMEM;
99041+ }
99042+
99043 /*
99044 * If we make a private mapping writable we increase our commit;
99045 * but (without finer accounting) cannot reduce our commit if we
99046@@ -289,6 +357,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
99047 }
99048 }
99049
99050+#ifdef CONFIG_PAX_SEGMEXEC
99051+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
99052+ if (start != vma->vm_start) {
99053+ error = split_vma(mm, vma, start, 1);
99054+ if (error)
99055+ goto fail;
99056+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
99057+ *pprev = (*pprev)->vm_next;
99058+ }
99059+
99060+ if (end != vma->vm_end) {
99061+ error = split_vma(mm, vma, end, 0);
99062+ if (error)
99063+ goto fail;
99064+ }
99065+
99066+ if (pax_find_mirror_vma(vma)) {
99067+ error = __do_munmap(mm, start_m, end_m - start_m);
99068+ if (error)
99069+ goto fail;
99070+ } else {
99071+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
99072+ if (!vma_m) {
99073+ error = -ENOMEM;
99074+ goto fail;
99075+ }
99076+ vma->vm_flags = newflags;
99077+ error = pax_mirror_vma(vma_m, vma);
99078+ if (error) {
99079+ vma->vm_flags = oldflags;
99080+ goto fail;
99081+ }
99082+ }
99083+ }
99084+#endif
99085+
99086 /*
99087 * First try to merge with previous and/or next vma.
99088 */
99089@@ -319,9 +423,21 @@ success:
99090 * vm_flags and vm_page_prot are protected by the mmap_sem
99091 * held in write mode.
99092 */
99093+
99094+#ifdef CONFIG_PAX_SEGMEXEC
99095+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
99096+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
99097+#endif
99098+
99099 vma->vm_flags = newflags;
99100+
99101+#ifdef CONFIG_PAX_MPROTECT
99102+ if (mm->binfmt && mm->binfmt->handle_mprotect)
99103+ mm->binfmt->handle_mprotect(vma, newflags);
99104+#endif
99105+
99106 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
99107- vm_get_page_prot(newflags));
99108+ vm_get_page_prot(vma->vm_flags));
99109
99110 if (vma_wants_writenotify(vma)) {
99111 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
99112@@ -360,6 +476,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
99113 end = start + len;
99114 if (end <= start)
99115 return -ENOMEM;
99116+
99117+#ifdef CONFIG_PAX_SEGMEXEC
99118+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
99119+ if (end > SEGMEXEC_TASK_SIZE)
99120+ return -EINVAL;
99121+ } else
99122+#endif
99123+
99124+ if (end > TASK_SIZE)
99125+ return -EINVAL;
99126+
99127 if (!arch_validate_prot(prot))
99128 return -EINVAL;
99129
99130@@ -367,7 +494,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
99131 /*
99132 * Does the application expect PROT_READ to imply PROT_EXEC:
99133 */
99134- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
99135+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
99136 prot |= PROT_EXEC;
99137
99138 vm_flags = calc_vm_prot_bits(prot);
99139@@ -399,6 +526,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
99140 if (start > vma->vm_start)
99141 prev = vma;
99142
99143+#ifdef CONFIG_PAX_MPROTECT
99144+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
99145+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
99146+#endif
99147+
99148 for (nstart = start ; ; ) {
99149 unsigned long newflags;
99150
99151@@ -409,6 +541,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
99152
99153 /* newflags >> 4 shift VM_MAY% in place of VM_% */
99154 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
99155+ if (prot & (PROT_WRITE | PROT_EXEC))
99156+ gr_log_rwxmprotect(vma);
99157+
99158+ error = -EACCES;
99159+ goto out;
99160+ }
99161+
99162+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
99163 error = -EACCES;
99164 goto out;
99165 }
99166@@ -423,6 +563,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
99167 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
99168 if (error)
99169 goto out;
99170+
99171+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
99172+
99173 nstart = tmp;
99174
99175 if (nstart < prev->vm_end)
99176diff --git a/mm/mremap.c b/mm/mremap.c
99177index 05f1180..c3cde48 100644
99178--- a/mm/mremap.c
99179+++ b/mm/mremap.c
99180@@ -144,6 +144,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
99181 continue;
99182 pte = ptep_get_and_clear(mm, old_addr, old_pte);
99183 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
99184+
99185+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
99186+ if (!(__supported_pte_mask & _PAGE_NX) && pte_present(pte) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
99187+ pte = pte_exprotect(pte);
99188+#endif
99189+
99190 pte = move_soft_dirty_pte(pte);
99191 set_pte_at(mm, new_addr, new_pte, pte);
99192 }
99193@@ -344,6 +350,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
99194 if (is_vm_hugetlb_page(vma))
99195 goto Einval;
99196
99197+#ifdef CONFIG_PAX_SEGMEXEC
99198+ if (pax_find_mirror_vma(vma))
99199+ goto Einval;
99200+#endif
99201+
99202 /* We can't remap across vm area boundaries */
99203 if (old_len > vma->vm_end - addr)
99204 goto Efault;
99205@@ -399,20 +410,25 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
99206 unsigned long ret = -EINVAL;
99207 unsigned long charged = 0;
99208 unsigned long map_flags;
99209+ unsigned long pax_task_size = TASK_SIZE;
99210
99211 if (new_addr & ~PAGE_MASK)
99212 goto out;
99213
99214- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
99215+#ifdef CONFIG_PAX_SEGMEXEC
99216+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
99217+ pax_task_size = SEGMEXEC_TASK_SIZE;
99218+#endif
99219+
99220+ pax_task_size -= PAGE_SIZE;
99221+
99222+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
99223 goto out;
99224
99225 /* Check if the location we're moving into overlaps the
99226 * old location at all, and fail if it does.
99227 */
99228- if ((new_addr <= addr) && (new_addr+new_len) > addr)
99229- goto out;
99230-
99231- if ((addr <= new_addr) && (addr+old_len) > new_addr)
99232+ if (addr + old_len > new_addr && new_addr + new_len > addr)
99233 goto out;
99234
99235 ret = do_munmap(mm, new_addr, new_len);
99236@@ -481,6 +497,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
99237 unsigned long ret = -EINVAL;
99238 unsigned long charged = 0;
99239 bool locked = false;
99240+ unsigned long pax_task_size = TASK_SIZE;
99241
99242 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
99243 return ret;
99244@@ -502,6 +519,17 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
99245 if (!new_len)
99246 return ret;
99247
99248+#ifdef CONFIG_PAX_SEGMEXEC
99249+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
99250+ pax_task_size = SEGMEXEC_TASK_SIZE;
99251+#endif
99252+
99253+ pax_task_size -= PAGE_SIZE;
99254+
99255+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
99256+ old_len > pax_task_size || addr > pax_task_size-old_len)
99257+ return ret;
99258+
99259 down_write(&current->mm->mmap_sem);
99260
99261 if (flags & MREMAP_FIXED) {
99262@@ -552,6 +580,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
99263 new_addr = addr;
99264 }
99265 ret = addr;
99266+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
99267 goto out;
99268 }
99269 }
99270@@ -575,7 +604,12 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
99271 goto out;
99272 }
99273
99274+ map_flags = vma->vm_flags;
99275 ret = move_vma(vma, addr, old_len, new_len, new_addr, &locked);
99276+ if (!(ret & ~PAGE_MASK)) {
99277+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
99278+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
99279+ }
99280 }
99281 out:
99282 if (ret & ~PAGE_MASK)
99283diff --git a/mm/nommu.c b/mm/nommu.c
99284index a881d96..e5932cd 100644
99285--- a/mm/nommu.c
99286+++ b/mm/nommu.c
99287@@ -70,7 +70,6 @@ int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
99288 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
99289 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
99290 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
99291-int heap_stack_gap = 0;
99292
99293 atomic_long_t mmap_pages_allocated;
99294
99295@@ -857,15 +856,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
99296 EXPORT_SYMBOL(find_vma);
99297
99298 /*
99299- * find a VMA
99300- * - we don't extend stack VMAs under NOMMU conditions
99301- */
99302-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
99303-{
99304- return find_vma(mm, addr);
99305-}
99306-
99307-/*
99308 * expand a stack to a given address
99309 * - not supported under NOMMU conditions
99310 */
99311@@ -1572,6 +1562,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
99312
99313 /* most fields are the same, copy all, and then fixup */
99314 *new = *vma;
99315+ INIT_LIST_HEAD(&new->anon_vma_chain);
99316 *region = *vma->vm_region;
99317 new->vm_region = region;
99318
99319@@ -2002,8 +1993,8 @@ int generic_file_remap_pages(struct vm_area_struct *vma, unsigned long addr,
99320 }
99321 EXPORT_SYMBOL(generic_file_remap_pages);
99322
99323-static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
99324- unsigned long addr, void *buf, int len, int write)
99325+static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
99326+ unsigned long addr, void *buf, size_t len, int write)
99327 {
99328 struct vm_area_struct *vma;
99329
99330@@ -2044,8 +2035,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
99331 *
99332 * The caller must hold a reference on @mm.
99333 */
99334-int access_remote_vm(struct mm_struct *mm, unsigned long addr,
99335- void *buf, int len, int write)
99336+ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
99337+ void *buf, size_t len, int write)
99338 {
99339 return __access_remote_vm(NULL, mm, addr, buf, len, write);
99340 }
99341@@ -2054,7 +2045,7 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
99342 * Access another process' address space.
99343 * - source/target buffer must be kernel space
99344 */
99345-int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
99346+ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write)
99347 {
99348 struct mm_struct *mm;
99349
99350diff --git a/mm/page-writeback.c b/mm/page-writeback.c
99351index 91d73ef..0e564d2 100644
99352--- a/mm/page-writeback.c
99353+++ b/mm/page-writeback.c
99354@@ -664,7 +664,7 @@ static long long pos_ratio_polynom(unsigned long setpoint,
99355 * card's bdi_dirty may rush to many times higher than bdi_setpoint.
99356 * - the bdi dirty thresh drops quickly due to change of JBOD workload
99357 */
99358-static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
99359+static unsigned long __intentional_overflow(-1) bdi_position_ratio(struct backing_dev_info *bdi,
99360 unsigned long thresh,
99361 unsigned long bg_thresh,
99362 unsigned long dirty,
99363diff --git a/mm/page_alloc.c b/mm/page_alloc.c
99364index eee9619..155d328 100644
99365--- a/mm/page_alloc.c
99366+++ b/mm/page_alloc.c
99367@@ -61,6 +61,7 @@
99368 #include <linux/page-debug-flags.h>
99369 #include <linux/hugetlb.h>
99370 #include <linux/sched/rt.h>
99371+#include <linux/random.h>
99372
99373 #include <asm/sections.h>
99374 #include <asm/tlbflush.h>
99375@@ -357,7 +358,7 @@ out:
99376 * This usage means that zero-order pages may not be compound.
99377 */
99378
99379-static void free_compound_page(struct page *page)
99380+void free_compound_page(struct page *page)
99381 {
99382 __free_pages_ok(page, compound_order(page));
99383 }
99384@@ -751,6 +752,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
99385 int i;
99386 int bad = 0;
99387
99388+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99389+ unsigned long index = 1UL << order;
99390+#endif
99391+
99392 trace_mm_page_free(page, order);
99393 kmemcheck_free_shadow(page, order);
99394
99395@@ -767,6 +772,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
99396 debug_check_no_obj_freed(page_address(page),
99397 PAGE_SIZE << order);
99398 }
99399+
99400+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99401+ for (; index; --index)
99402+ sanitize_highpage(page + index - 1);
99403+#endif
99404+
99405 arch_free_page(page, order);
99406 kernel_map_pages(page, 1 << order, 0);
99407
99408@@ -790,6 +801,20 @@ static void __free_pages_ok(struct page *page, unsigned int order)
99409 local_irq_restore(flags);
99410 }
99411
99412+#ifdef CONFIG_PAX_LATENT_ENTROPY
99413+bool __meminitdata extra_latent_entropy;
99414+
99415+static int __init setup_pax_extra_latent_entropy(char *str)
99416+{
99417+ extra_latent_entropy = true;
99418+ return 0;
99419+}
99420+early_param("pax_extra_latent_entropy", setup_pax_extra_latent_entropy);
99421+
99422+volatile u64 latent_entropy __latent_entropy;
99423+EXPORT_SYMBOL(latent_entropy);
99424+#endif
99425+
99426 void __init __free_pages_bootmem(struct page *page, unsigned int order)
99427 {
99428 unsigned int nr_pages = 1 << order;
99429@@ -805,6 +830,19 @@ void __init __free_pages_bootmem(struct page *page, unsigned int order)
99430 __ClearPageReserved(p);
99431 set_page_count(p, 0);
99432
99433+#ifdef CONFIG_PAX_LATENT_ENTROPY
99434+ if (extra_latent_entropy && !PageHighMem(page) && page_to_pfn(page) < 0x100000) {
99435+ u64 hash = 0;
99436+ size_t index, end = PAGE_SIZE * nr_pages / sizeof hash;
99437+ const u64 *data = lowmem_page_address(page);
99438+
99439+ for (index = 0; index < end; index++)
99440+ hash ^= hash + data[index];
99441+ latent_entropy ^= hash;
99442+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
99443+ }
99444+#endif
99445+
99446 page_zone(page)->managed_pages += nr_pages;
99447 set_page_refcounted(page);
99448 __free_pages(page, order);
99449@@ -933,8 +971,10 @@ static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags)
99450 arch_alloc_page(page, order);
99451 kernel_map_pages(page, 1 << order, 1);
99452
99453+#ifndef CONFIG_PAX_MEMORY_SANITIZE
99454 if (gfp_flags & __GFP_ZERO)
99455 prep_zero_page(page, order, gfp_flags);
99456+#endif
99457
99458 if (order && (gfp_flags & __GFP_COMP))
99459 prep_compound_page(page, order);
99460@@ -1612,7 +1652,7 @@ again:
99461 }
99462
99463 __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));
99464- if (atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]) <= 0 &&
99465+ if (atomic_long_read_unchecked(&zone->vm_stat[NR_ALLOC_BATCH]) <= 0 &&
99466 !zone_is_fair_depleted(zone))
99467 zone_set_flag(zone, ZONE_FAIR_DEPLETED);
99468
99469@@ -1933,7 +1973,7 @@ static void reset_alloc_batches(struct zone *preferred_zone)
99470 do {
99471 mod_zone_page_state(zone, NR_ALLOC_BATCH,
99472 high_wmark_pages(zone) - low_wmark_pages(zone) -
99473- atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
99474+ atomic_long_read_unchecked(&zone->vm_stat[NR_ALLOC_BATCH]));
99475 zone_clear_flag(zone, ZONE_FAIR_DEPLETED);
99476 } while (zone++ != preferred_zone);
99477 }
99478@@ -5702,7 +5742,7 @@ static void __setup_per_zone_wmarks(void)
99479
99480 __mod_zone_page_state(zone, NR_ALLOC_BATCH,
99481 high_wmark_pages(zone) - low_wmark_pages(zone) -
99482- atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
99483+ atomic_long_read_unchecked(&zone->vm_stat[NR_ALLOC_BATCH]));
99484
99485 setup_zone_migrate_reserve(zone);
99486 spin_unlock_irqrestore(&zone->lock, flags);
99487diff --git a/mm/percpu.c b/mm/percpu.c
99488index da997f9..19040e9 100644
99489--- a/mm/percpu.c
99490+++ b/mm/percpu.c
99491@@ -123,7 +123,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
99492 static unsigned int pcpu_high_unit_cpu __read_mostly;
99493
99494 /* the address of the first chunk which starts with the kernel static area */
99495-void *pcpu_base_addr __read_mostly;
99496+void *pcpu_base_addr __read_only;
99497 EXPORT_SYMBOL_GPL(pcpu_base_addr);
99498
99499 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
99500diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
99501index 5077afc..846c9ef 100644
99502--- a/mm/process_vm_access.c
99503+++ b/mm/process_vm_access.c
99504@@ -13,6 +13,7 @@
99505 #include <linux/uio.h>
99506 #include <linux/sched.h>
99507 #include <linux/highmem.h>
99508+#include <linux/security.h>
99509 #include <linux/ptrace.h>
99510 #include <linux/slab.h>
99511 #include <linux/syscalls.h>
99512@@ -157,19 +158,19 @@ static ssize_t process_vm_rw_core(pid_t pid, struct iov_iter *iter,
99513 ssize_t iov_len;
99514 size_t total_len = iov_iter_count(iter);
99515
99516+ return -ENOSYS; // PaX: until properly audited
99517+
99518 /*
99519 * Work out how many pages of struct pages we're going to need
99520 * when eventually calling get_user_pages
99521 */
99522 for (i = 0; i < riovcnt; i++) {
99523 iov_len = rvec[i].iov_len;
99524- if (iov_len > 0) {
99525- nr_pages_iov = ((unsigned long)rvec[i].iov_base
99526- + iov_len)
99527- / PAGE_SIZE - (unsigned long)rvec[i].iov_base
99528- / PAGE_SIZE + 1;
99529- nr_pages = max(nr_pages, nr_pages_iov);
99530- }
99531+ if (iov_len <= 0)
99532+ continue;
99533+ nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE -
99534+ (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1;
99535+ nr_pages = max(nr_pages, nr_pages_iov);
99536 }
99537
99538 if (nr_pages == 0)
99539@@ -197,6 +198,11 @@ static ssize_t process_vm_rw_core(pid_t pid, struct iov_iter *iter,
99540 goto free_proc_pages;
99541 }
99542
99543+ if (gr_handle_ptrace(task, vm_write ? PTRACE_POKETEXT : PTRACE_ATTACH)) {
99544+ rc = -EPERM;
99545+ goto put_task_struct;
99546+ }
99547+
99548 mm = mm_access(task, PTRACE_MODE_ATTACH);
99549 if (!mm || IS_ERR(mm)) {
99550 rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
99551diff --git a/mm/rmap.c b/mm/rmap.c
99552index 3e8491c..02abccc 100644
99553--- a/mm/rmap.c
99554+++ b/mm/rmap.c
99555@@ -164,6 +164,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
99556 struct anon_vma *anon_vma = vma->anon_vma;
99557 struct anon_vma_chain *avc;
99558
99559+#ifdef CONFIG_PAX_SEGMEXEC
99560+ struct anon_vma_chain *avc_m = NULL;
99561+#endif
99562+
99563 might_sleep();
99564 if (unlikely(!anon_vma)) {
99565 struct mm_struct *mm = vma->vm_mm;
99566@@ -173,6 +177,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
99567 if (!avc)
99568 goto out_enomem;
99569
99570+#ifdef CONFIG_PAX_SEGMEXEC
99571+ avc_m = anon_vma_chain_alloc(GFP_KERNEL);
99572+ if (!avc_m)
99573+ goto out_enomem_free_avc;
99574+#endif
99575+
99576 anon_vma = find_mergeable_anon_vma(vma);
99577 allocated = NULL;
99578 if (!anon_vma) {
99579@@ -186,6 +196,18 @@ int anon_vma_prepare(struct vm_area_struct *vma)
99580 /* page_table_lock to protect against threads */
99581 spin_lock(&mm->page_table_lock);
99582 if (likely(!vma->anon_vma)) {
99583+
99584+#ifdef CONFIG_PAX_SEGMEXEC
99585+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
99586+
99587+ if (vma_m) {
99588+ BUG_ON(vma_m->anon_vma);
99589+ vma_m->anon_vma = anon_vma;
99590+ anon_vma_chain_link(vma_m, avc_m, anon_vma);
99591+ avc_m = NULL;
99592+ }
99593+#endif
99594+
99595 vma->anon_vma = anon_vma;
99596 anon_vma_chain_link(vma, avc, anon_vma);
99597 allocated = NULL;
99598@@ -196,12 +218,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
99599
99600 if (unlikely(allocated))
99601 put_anon_vma(allocated);
99602+
99603+#ifdef CONFIG_PAX_SEGMEXEC
99604+ if (unlikely(avc_m))
99605+ anon_vma_chain_free(avc_m);
99606+#endif
99607+
99608 if (unlikely(avc))
99609 anon_vma_chain_free(avc);
99610 }
99611 return 0;
99612
99613 out_enomem_free_avc:
99614+
99615+#ifdef CONFIG_PAX_SEGMEXEC
99616+ if (avc_m)
99617+ anon_vma_chain_free(avc_m);
99618+#endif
99619+
99620 anon_vma_chain_free(avc);
99621 out_enomem:
99622 return -ENOMEM;
99623@@ -237,7 +271,7 @@ static inline void unlock_anon_vma_root(struct anon_vma *root)
99624 * Attach the anon_vmas from src to dst.
99625 * Returns 0 on success, -ENOMEM on failure.
99626 */
99627-int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
99628+int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
99629 {
99630 struct anon_vma_chain *avc, *pavc;
99631 struct anon_vma *root = NULL;
99632@@ -270,7 +304,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
99633 * the corresponding VMA in the parent process is attached to.
99634 * Returns 0 on success, non-zero on failure.
99635 */
99636-int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
99637+int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
99638 {
99639 struct anon_vma_chain *avc;
99640 struct anon_vma *anon_vma;
99641@@ -374,8 +408,10 @@ static void anon_vma_ctor(void *data)
99642 void __init anon_vma_init(void)
99643 {
99644 anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
99645- 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor);
99646- anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, SLAB_PANIC);
99647+ 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC|SLAB_NO_SANITIZE,
99648+ anon_vma_ctor);
99649+ anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain,
99650+ SLAB_PANIC|SLAB_NO_SANITIZE);
99651 }
99652
99653 /*
99654diff --git a/mm/shmem.c b/mm/shmem.c
99655index 469f90d..34a09ee 100644
99656--- a/mm/shmem.c
99657+++ b/mm/shmem.c
99658@@ -33,7 +33,7 @@
99659 #include <linux/swap.h>
99660 #include <linux/aio.h>
99661
99662-static struct vfsmount *shm_mnt;
99663+struct vfsmount *shm_mnt;
99664
99665 #ifdef CONFIG_SHMEM
99666 /*
99667@@ -80,7 +80,7 @@ static struct vfsmount *shm_mnt;
99668 #define BOGO_DIRENT_SIZE 20
99669
99670 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
99671-#define SHORT_SYMLINK_LEN 128
99672+#define SHORT_SYMLINK_LEN 64
99673
99674 /*
99675 * shmem_fallocate communicates with shmem_fault or shmem_writepage via
99676@@ -2524,6 +2524,11 @@ static const struct xattr_handler *shmem_xattr_handlers[] = {
99677 static int shmem_xattr_validate(const char *name)
99678 {
99679 struct { const char *prefix; size_t len; } arr[] = {
99680+
99681+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
99682+ { XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN},
99683+#endif
99684+
99685 { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
99686 { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
99687 };
99688@@ -2579,6 +2584,15 @@ static int shmem_setxattr(struct dentry *dentry, const char *name,
99689 if (err)
99690 return err;
99691
99692+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
99693+ if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) {
99694+ if (strcmp(name, XATTR_NAME_PAX_FLAGS))
99695+ return -EOPNOTSUPP;
99696+ if (size > 8)
99697+ return -EINVAL;
99698+ }
99699+#endif
99700+
99701 return simple_xattr_set(&info->xattrs, name, value, size, flags);
99702 }
99703
99704@@ -2962,8 +2976,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
99705 int err = -ENOMEM;
99706
99707 /* Round up to L1_CACHE_BYTES to resist false sharing */
99708- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
99709- L1_CACHE_BYTES), GFP_KERNEL);
99710+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
99711 if (!sbinfo)
99712 return -ENOMEM;
99713
99714diff --git a/mm/slab.c b/mm/slab.c
99715index 7c52b38..3ccc17e 100644
99716--- a/mm/slab.c
99717+++ b/mm/slab.c
99718@@ -316,10 +316,12 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
99719 if ((x)->max_freeable < i) \
99720 (x)->max_freeable = i; \
99721 } while (0)
99722-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
99723-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
99724-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
99725-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
99726+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
99727+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
99728+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
99729+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
99730+#define STATS_INC_SANITIZED(x) atomic_inc_unchecked(&(x)->sanitized)
99731+#define STATS_INC_NOT_SANITIZED(x) atomic_inc_unchecked(&(x)->not_sanitized)
99732 #else
99733 #define STATS_INC_ACTIVE(x) do { } while (0)
99734 #define STATS_DEC_ACTIVE(x) do { } while (0)
99735@@ -336,6 +338,8 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
99736 #define STATS_INC_ALLOCMISS(x) do { } while (0)
99737 #define STATS_INC_FREEHIT(x) do { } while (0)
99738 #define STATS_INC_FREEMISS(x) do { } while (0)
99739+#define STATS_INC_SANITIZED(x) do { } while (0)
99740+#define STATS_INC_NOT_SANITIZED(x) do { } while (0)
99741 #endif
99742
99743 #if DEBUG
99744@@ -452,7 +456,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct page *page,
99745 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
99746 */
99747 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
99748- const struct page *page, void *obj)
99749+ const struct page *page, const void *obj)
99750 {
99751 u32 offset = (obj - page->s_mem);
99752 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
99753@@ -1462,12 +1466,12 @@ void __init kmem_cache_init(void)
99754 */
99755
99756 kmalloc_caches[INDEX_AC] = create_kmalloc_cache("kmalloc-ac",
99757- kmalloc_size(INDEX_AC), ARCH_KMALLOC_FLAGS);
99758+ kmalloc_size(INDEX_AC), SLAB_USERCOPY | ARCH_KMALLOC_FLAGS);
99759
99760 if (INDEX_AC != INDEX_NODE)
99761 kmalloc_caches[INDEX_NODE] =
99762 create_kmalloc_cache("kmalloc-node",
99763- kmalloc_size(INDEX_NODE), ARCH_KMALLOC_FLAGS);
99764+ kmalloc_size(INDEX_NODE), SLAB_USERCOPY | ARCH_KMALLOC_FLAGS);
99765
99766 slab_early_init = 0;
99767
99768@@ -3384,6 +3388,20 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp,
99769 struct array_cache *ac = cpu_cache_get(cachep);
99770
99771 check_irq_off();
99772+
99773+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99774+ if (cachep->flags & (SLAB_POISON | SLAB_NO_SANITIZE))
99775+ STATS_INC_NOT_SANITIZED(cachep);
99776+ else {
99777+ memset(objp, PAX_MEMORY_SANITIZE_VALUE, cachep->object_size);
99778+
99779+ if (cachep->ctor)
99780+ cachep->ctor(objp);
99781+
99782+ STATS_INC_SANITIZED(cachep);
99783+ }
99784+#endif
99785+
99786 kmemleak_free_recursive(objp, cachep->flags);
99787 objp = cache_free_debugcheck(cachep, objp, caller);
99788
99789@@ -3607,6 +3625,7 @@ void kfree(const void *objp)
99790
99791 if (unlikely(ZERO_OR_NULL_PTR(objp)))
99792 return;
99793+ VM_BUG_ON(!virt_addr_valid(objp));
99794 local_irq_save(flags);
99795 kfree_debugcheck(objp);
99796 c = virt_to_cache(objp);
99797@@ -4056,14 +4075,22 @@ void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
99798 }
99799 /* cpu stats */
99800 {
99801- unsigned long allochit = atomic_read(&cachep->allochit);
99802- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
99803- unsigned long freehit = atomic_read(&cachep->freehit);
99804- unsigned long freemiss = atomic_read(&cachep->freemiss);
99805+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
99806+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
99807+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
99808+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
99809
99810 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
99811 allochit, allocmiss, freehit, freemiss);
99812 }
99813+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99814+ {
99815+ unsigned long sanitized = atomic_read_unchecked(&cachep->sanitized);
99816+ unsigned long not_sanitized = atomic_read_unchecked(&cachep->not_sanitized);
99817+
99818+ seq_printf(m, " : pax %6lu %6lu", sanitized, not_sanitized);
99819+ }
99820+#endif
99821 #endif
99822 }
99823
99824@@ -4281,13 +4308,69 @@ static const struct file_operations proc_slabstats_operations = {
99825 static int __init slab_proc_init(void)
99826 {
99827 #ifdef CONFIG_DEBUG_SLAB_LEAK
99828- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
99829+ proc_create("slab_allocators", S_IRUSR, NULL, &proc_slabstats_operations);
99830 #endif
99831 return 0;
99832 }
99833 module_init(slab_proc_init);
99834 #endif
99835
99836+bool is_usercopy_object(const void *ptr)
99837+{
99838+ struct page *page;
99839+ struct kmem_cache *cachep;
99840+
99841+ if (ZERO_OR_NULL_PTR(ptr))
99842+ return false;
99843+
99844+ if (!slab_is_available())
99845+ return false;
99846+
99847+ if (!virt_addr_valid(ptr))
99848+ return false;
99849+
99850+ page = virt_to_head_page(ptr);
99851+
99852+ if (!PageSlab(page))
99853+ return false;
99854+
99855+ cachep = page->slab_cache;
99856+ return cachep->flags & SLAB_USERCOPY;
99857+}
99858+
99859+#ifdef CONFIG_PAX_USERCOPY
99860+const char *check_heap_object(const void *ptr, unsigned long n)
99861+{
99862+ struct page *page;
99863+ struct kmem_cache *cachep;
99864+ unsigned int objnr;
99865+ unsigned long offset;
99866+
99867+ if (ZERO_OR_NULL_PTR(ptr))
99868+ return "<null>";
99869+
99870+ if (!virt_addr_valid(ptr))
99871+ return NULL;
99872+
99873+ page = virt_to_head_page(ptr);
99874+
99875+ if (!PageSlab(page))
99876+ return NULL;
99877+
99878+ cachep = page->slab_cache;
99879+ if (!(cachep->flags & SLAB_USERCOPY))
99880+ return cachep->name;
99881+
99882+ objnr = obj_to_index(cachep, page, ptr);
99883+ BUG_ON(objnr >= cachep->num);
99884+ offset = ptr - index_to_obj(cachep, page, objnr) - obj_offset(cachep);
99885+ if (offset <= cachep->object_size && n <= cachep->object_size - offset)
99886+ return NULL;
99887+
99888+ return cachep->name;
99889+}
99890+#endif
99891+
99892 /**
99893 * ksize - get the actual amount of memory allocated for a given object
99894 * @objp: Pointer to the object
99895diff --git a/mm/slab.h b/mm/slab.h
99896index 0e0fdd3..d0fd761 100644
99897--- a/mm/slab.h
99898+++ b/mm/slab.h
99899@@ -32,6 +32,20 @@ extern struct list_head slab_caches;
99900 /* The slab cache that manages slab cache information */
99901 extern struct kmem_cache *kmem_cache;
99902
99903+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99904+#ifdef CONFIG_X86_64
99905+#define PAX_MEMORY_SANITIZE_VALUE '\xfe'
99906+#else
99907+#define PAX_MEMORY_SANITIZE_VALUE '\xff'
99908+#endif
99909+enum pax_sanitize_mode {
99910+ PAX_SANITIZE_SLAB_OFF = 0,
99911+ PAX_SANITIZE_SLAB_FAST,
99912+ PAX_SANITIZE_SLAB_FULL,
99913+};
99914+extern enum pax_sanitize_mode pax_sanitize_slab;
99915+#endif
99916+
99917 unsigned long calculate_alignment(unsigned long flags,
99918 unsigned long align, unsigned long size);
99919
99920@@ -67,7 +81,8 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
99921
99922 /* Legal flag mask for kmem_cache_create(), for various configurations */
99923 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
99924- SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS )
99925+ SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS | \
99926+ SLAB_USERCOPY | SLAB_NO_SANITIZE)
99927
99928 #if defined(CONFIG_DEBUG_SLAB)
99929 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
99930@@ -251,6 +266,9 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
99931 return s;
99932
99933 page = virt_to_head_page(x);
99934+
99935+ BUG_ON(!PageSlab(page));
99936+
99937 cachep = page->slab_cache;
99938 if (slab_equal_or_root(cachep, s))
99939 return cachep;
99940diff --git a/mm/slab_common.c b/mm/slab_common.c
99941index d319502..da7714e 100644
99942--- a/mm/slab_common.c
99943+++ b/mm/slab_common.c
99944@@ -25,11 +25,35 @@
99945
99946 #include "slab.h"
99947
99948-enum slab_state slab_state;
99949+enum slab_state slab_state __read_only;
99950 LIST_HEAD(slab_caches);
99951 DEFINE_MUTEX(slab_mutex);
99952 struct kmem_cache *kmem_cache;
99953
99954+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99955+enum pax_sanitize_mode pax_sanitize_slab __read_only = PAX_SANITIZE_SLAB_FAST;
99956+static int __init pax_sanitize_slab_setup(char *str)
99957+{
99958+ if (!str)
99959+ return 0;
99960+
99961+ if (!strcmp(str, "0") || !strcmp(str, "off")) {
99962+ pr_info("PaX slab sanitization: %s\n", "disabled");
99963+ pax_sanitize_slab = PAX_SANITIZE_SLAB_OFF;
99964+ } else if (!strcmp(str, "1") || !strcmp(str, "fast")) {
99965+ pr_info("PaX slab sanitization: %s\n", "fast");
99966+ pax_sanitize_slab = PAX_SANITIZE_SLAB_FAST;
99967+ } else if (!strcmp(str, "full")) {
99968+ pr_info("PaX slab sanitization: %s\n", "full");
99969+ pax_sanitize_slab = PAX_SANITIZE_SLAB_FULL;
99970+ } else
99971+ pr_err("PaX slab sanitization: unsupported option '%s'\n", str);
99972+
99973+ return 0;
99974+}
99975+early_param("pax_sanitize_slab", pax_sanitize_slab_setup);
99976+#endif
99977+
99978 #ifdef CONFIG_DEBUG_VM
99979 static int kmem_cache_sanity_check(const char *name, size_t size)
99980 {
99981@@ -160,7 +184,7 @@ do_kmem_cache_create(char *name, size_t object_size, size_t size, size_t align,
99982 if (err)
99983 goto out_free_cache;
99984
99985- s->refcount = 1;
99986+ atomic_set(&s->refcount, 1);
99987 list_add(&s->list, &slab_caches);
99988 out:
99989 if (err)
99990@@ -222,6 +246,13 @@ kmem_cache_create(const char *name, size_t size, size_t align,
99991 */
99992 flags &= CACHE_CREATE_MASK;
99993
99994+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99995+ if (pax_sanitize_slab == PAX_SANITIZE_SLAB_OFF || (flags & SLAB_DESTROY_BY_RCU))
99996+ flags |= SLAB_NO_SANITIZE;
99997+ else if (pax_sanitize_slab == PAX_SANITIZE_SLAB_FULL)
99998+ flags &= ~SLAB_NO_SANITIZE;
99999+#endif
100000+
100001 s = __kmem_cache_alias(name, size, align, flags, ctor);
100002 if (s)
100003 goto out_unlock;
100004@@ -341,8 +372,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
100005
100006 mutex_lock(&slab_mutex);
100007
100008- s->refcount--;
100009- if (s->refcount)
100010+ if (!atomic_dec_and_test(&s->refcount))
100011 goto out_unlock;
100012
100013 if (memcg_cleanup_cache_params(s) != 0)
100014@@ -362,7 +392,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
100015 rcu_barrier();
100016
100017 memcg_free_cache_params(s);
100018-#ifdef SLAB_SUPPORTS_SYSFS
100019+#if defined(SLAB_SUPPORTS_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
100020 sysfs_slab_remove(s);
100021 #else
100022 slab_kmem_cache_release(s);
100023@@ -418,7 +448,7 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t siz
100024 panic("Creation of kmalloc slab %s size=%zu failed. Reason %d\n",
100025 name, size, err);
100026
100027- s->refcount = -1; /* Exempt from merging for now */
100028+ atomic_set(&s->refcount, -1); /* Exempt from merging for now */
100029 }
100030
100031 struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
100032@@ -431,7 +461,7 @@ struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
100033
100034 create_boot_cache(s, name, size, flags);
100035 list_add(&s->list, &slab_caches);
100036- s->refcount = 1;
100037+ atomic_set(&s->refcount, 1);
100038 return s;
100039 }
100040
100041@@ -443,6 +473,11 @@ struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
100042 EXPORT_SYMBOL(kmalloc_dma_caches);
100043 #endif
100044
100045+#ifdef CONFIG_PAX_USERCOPY_SLABS
100046+struct kmem_cache *kmalloc_usercopy_caches[KMALLOC_SHIFT_HIGH + 1];
100047+EXPORT_SYMBOL(kmalloc_usercopy_caches);
100048+#endif
100049+
100050 /*
100051 * Conversion table for small slabs sizes / 8 to the index in the
100052 * kmalloc array. This is necessary for slabs < 192 since we have non power
100053@@ -507,6 +542,13 @@ struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
100054 return kmalloc_dma_caches[index];
100055
100056 #endif
100057+
100058+#ifdef CONFIG_PAX_USERCOPY_SLABS
100059+ if (unlikely((flags & GFP_USERCOPY)))
100060+ return kmalloc_usercopy_caches[index];
100061+
100062+#endif
100063+
100064 return kmalloc_caches[index];
100065 }
100066
100067@@ -563,7 +605,7 @@ void __init create_kmalloc_caches(unsigned long flags)
100068 for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
100069 if (!kmalloc_caches[i]) {
100070 kmalloc_caches[i] = create_kmalloc_cache(NULL,
100071- 1 << i, flags);
100072+ 1 << i, SLAB_USERCOPY | flags);
100073 }
100074
100075 /*
100076@@ -572,10 +614,10 @@ void __init create_kmalloc_caches(unsigned long flags)
100077 * earlier power of two caches
100078 */
100079 if (KMALLOC_MIN_SIZE <= 32 && !kmalloc_caches[1] && i == 6)
100080- kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, flags);
100081+ kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, SLAB_USERCOPY | flags);
100082
100083 if (KMALLOC_MIN_SIZE <= 64 && !kmalloc_caches[2] && i == 7)
100084- kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, flags);
100085+ kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, SLAB_USERCOPY | flags);
100086 }
100087
100088 /* Kmalloc array is now usable */
100089@@ -608,6 +650,23 @@ void __init create_kmalloc_caches(unsigned long flags)
100090 }
100091 }
100092 #endif
100093+
100094+#ifdef CONFIG_PAX_USERCOPY_SLABS
100095+ for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
100096+ struct kmem_cache *s = kmalloc_caches[i];
100097+
100098+ if (s) {
100099+ int size = kmalloc_size(i);
100100+ char *n = kasprintf(GFP_NOWAIT,
100101+ "usercopy-kmalloc-%d", size);
100102+
100103+ BUG_ON(!n);
100104+ kmalloc_usercopy_caches[i] = create_kmalloc_cache(n,
100105+ size, SLAB_USERCOPY | flags);
100106+ }
100107+ }
100108+#endif
100109+
100110 }
100111 #endif /* !CONFIG_SLOB */
100112
100113@@ -666,6 +725,9 @@ void print_slabinfo_header(struct seq_file *m)
100114 seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> "
100115 "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
100116 seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
100117+#ifdef CONFIG_PAX_MEMORY_SANITIZE
100118+ seq_puts(m, " : pax <sanitized> <not_sanitized>");
100119+#endif
100120 #endif
100121 seq_putc(m, '\n');
100122 }
100123diff --git a/mm/slob.c b/mm/slob.c
100124index 21980e0..975f1bf 100644
100125--- a/mm/slob.c
100126+++ b/mm/slob.c
100127@@ -157,7 +157,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
100128 /*
100129 * Return the size of a slob block.
100130 */
100131-static slobidx_t slob_units(slob_t *s)
100132+static slobidx_t slob_units(const slob_t *s)
100133 {
100134 if (s->units > 0)
100135 return s->units;
100136@@ -167,7 +167,7 @@ static slobidx_t slob_units(slob_t *s)
100137 /*
100138 * Return the next free slob block pointer after this one.
100139 */
100140-static slob_t *slob_next(slob_t *s)
100141+static slob_t *slob_next(const slob_t *s)
100142 {
100143 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
100144 slobidx_t next;
100145@@ -182,14 +182,14 @@ static slob_t *slob_next(slob_t *s)
100146 /*
100147 * Returns true if s is the last free block in its page.
100148 */
100149-static int slob_last(slob_t *s)
100150+static int slob_last(const slob_t *s)
100151 {
100152 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
100153 }
100154
100155-static void *slob_new_pages(gfp_t gfp, int order, int node)
100156+static struct page *slob_new_pages(gfp_t gfp, unsigned int order, int node)
100157 {
100158- void *page;
100159+ struct page *page;
100160
100161 #ifdef CONFIG_NUMA
100162 if (node != NUMA_NO_NODE)
100163@@ -201,14 +201,18 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
100164 if (!page)
100165 return NULL;
100166
100167- return page_address(page);
100168+ __SetPageSlab(page);
100169+ return page;
100170 }
100171
100172-static void slob_free_pages(void *b, int order)
100173+static void slob_free_pages(struct page *sp, int order)
100174 {
100175 if (current->reclaim_state)
100176 current->reclaim_state->reclaimed_slab += 1 << order;
100177- free_pages((unsigned long)b, order);
100178+ __ClearPageSlab(sp);
100179+ page_mapcount_reset(sp);
100180+ sp->private = 0;
100181+ __free_pages(sp, order);
100182 }
100183
100184 /*
100185@@ -313,15 +317,15 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
100186
100187 /* Not enough space: must allocate a new page */
100188 if (!b) {
100189- b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
100190- if (!b)
100191+ sp = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
100192+ if (!sp)
100193 return NULL;
100194- sp = virt_to_page(b);
100195- __SetPageSlab(sp);
100196+ b = page_address(sp);
100197
100198 spin_lock_irqsave(&slob_lock, flags);
100199 sp->units = SLOB_UNITS(PAGE_SIZE);
100200 sp->freelist = b;
100201+ sp->private = 0;
100202 INIT_LIST_HEAD(&sp->lru);
100203 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
100204 set_slob_page_free(sp, slob_list);
100205@@ -337,7 +341,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
100206 /*
100207 * slob_free: entry point into the slob allocator.
100208 */
100209-static void slob_free(void *block, int size)
100210+static void slob_free(struct kmem_cache *c, void *block, int size)
100211 {
100212 struct page *sp;
100213 slob_t *prev, *next, *b = (slob_t *)block;
100214@@ -359,12 +363,15 @@ static void slob_free(void *block, int size)
100215 if (slob_page_free(sp))
100216 clear_slob_page_free(sp);
100217 spin_unlock_irqrestore(&slob_lock, flags);
100218- __ClearPageSlab(sp);
100219- page_mapcount_reset(sp);
100220- slob_free_pages(b, 0);
100221+ slob_free_pages(sp, 0);
100222 return;
100223 }
100224
100225+#ifdef CONFIG_PAX_MEMORY_SANITIZE
100226+ if (pax_sanitize_slab && !(c && (c->flags & SLAB_NO_SANITIZE)))
100227+ memset(block, PAX_MEMORY_SANITIZE_VALUE, size);
100228+#endif
100229+
100230 if (!slob_page_free(sp)) {
100231 /* This slob page is about to become partially free. Easy! */
100232 sp->units = units;
100233@@ -424,11 +431,10 @@ out:
100234 */
100235
100236 static __always_inline void *
100237-__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
100238+__do_kmalloc_node_align(size_t size, gfp_t gfp, int node, unsigned long caller, int align)
100239 {
100240- unsigned int *m;
100241- int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
100242- void *ret;
100243+ slob_t *m;
100244+ void *ret = NULL;
100245
100246 gfp &= gfp_allowed_mask;
100247
100248@@ -442,23 +448,41 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
100249
100250 if (!m)
100251 return NULL;
100252- *m = size;
100253+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
100254+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
100255+ m[0].units = size;
100256+ m[1].units = align;
100257 ret = (void *)m + align;
100258
100259 trace_kmalloc_node(caller, ret,
100260 size, size + align, gfp, node);
100261 } else {
100262 unsigned int order = get_order(size);
100263+ struct page *page;
100264
100265 if (likely(order))
100266 gfp |= __GFP_COMP;
100267- ret = slob_new_pages(gfp, order, node);
100268+ page = slob_new_pages(gfp, order, node);
100269+ if (page) {
100270+ ret = page_address(page);
100271+ page->private = size;
100272+ }
100273
100274 trace_kmalloc_node(caller, ret,
100275 size, PAGE_SIZE << order, gfp, node);
100276 }
100277
100278- kmemleak_alloc(ret, size, 1, gfp);
100279+ return ret;
100280+}
100281+
100282+static __always_inline void *
100283+__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
100284+{
100285+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
100286+ void *ret = __do_kmalloc_node_align(size, gfp, node, caller, align);
100287+
100288+ if (!ZERO_OR_NULL_PTR(ret))
100289+ kmemleak_alloc(ret, size, 1, gfp);
100290 return ret;
100291 }
100292
100293@@ -493,34 +517,112 @@ void kfree(const void *block)
100294 return;
100295 kmemleak_free(block);
100296
100297+ VM_BUG_ON(!virt_addr_valid(block));
100298 sp = virt_to_page(block);
100299- if (PageSlab(sp)) {
100300+ VM_BUG_ON(!PageSlab(sp));
100301+ if (!sp->private) {
100302 int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
100303- unsigned int *m = (unsigned int *)(block - align);
100304- slob_free(m, *m + align);
100305- } else
100306+ slob_t *m = (slob_t *)(block - align);
100307+ slob_free(NULL, m, m[0].units + align);
100308+ } else {
100309+ __ClearPageSlab(sp);
100310+ page_mapcount_reset(sp);
100311+ sp->private = 0;
100312 __free_pages(sp, compound_order(sp));
100313+ }
100314 }
100315 EXPORT_SYMBOL(kfree);
100316
100317+bool is_usercopy_object(const void *ptr)
100318+{
100319+ if (!slab_is_available())
100320+ return false;
100321+
100322+ // PAX: TODO
100323+
100324+ return false;
100325+}
100326+
100327+#ifdef CONFIG_PAX_USERCOPY
100328+const char *check_heap_object(const void *ptr, unsigned long n)
100329+{
100330+ struct page *page;
100331+ const slob_t *free;
100332+ const void *base;
100333+ unsigned long flags;
100334+
100335+ if (ZERO_OR_NULL_PTR(ptr))
100336+ return "<null>";
100337+
100338+ if (!virt_addr_valid(ptr))
100339+ return NULL;
100340+
100341+ page = virt_to_head_page(ptr);
100342+ if (!PageSlab(page))
100343+ return NULL;
100344+
100345+ if (page->private) {
100346+ base = page;
100347+ if (base <= ptr && n <= page->private - (ptr - base))
100348+ return NULL;
100349+ return "<slob>";
100350+ }
100351+
100352+ /* some tricky double walking to find the chunk */
100353+ spin_lock_irqsave(&slob_lock, flags);
100354+ base = (void *)((unsigned long)ptr & PAGE_MASK);
100355+ free = page->freelist;
100356+
100357+ while (!slob_last(free) && (void *)free <= ptr) {
100358+ base = free + slob_units(free);
100359+ free = slob_next(free);
100360+ }
100361+
100362+ while (base < (void *)free) {
100363+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
100364+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
100365+ int offset;
100366+
100367+ if (ptr < base + align)
100368+ break;
100369+
100370+ offset = ptr - base - align;
100371+ if (offset >= m) {
100372+ base += size;
100373+ continue;
100374+ }
100375+
100376+ if (n > m - offset)
100377+ break;
100378+
100379+ spin_unlock_irqrestore(&slob_lock, flags);
100380+ return NULL;
100381+ }
100382+
100383+ spin_unlock_irqrestore(&slob_lock, flags);
100384+ return "<slob>";
100385+}
100386+#endif
100387+
100388 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
100389 size_t ksize(const void *block)
100390 {
100391 struct page *sp;
100392 int align;
100393- unsigned int *m;
100394+ slob_t *m;
100395
100396 BUG_ON(!block);
100397 if (unlikely(block == ZERO_SIZE_PTR))
100398 return 0;
100399
100400 sp = virt_to_page(block);
100401- if (unlikely(!PageSlab(sp)))
100402- return PAGE_SIZE << compound_order(sp);
100403+ VM_BUG_ON(!PageSlab(sp));
100404+ if (sp->private)
100405+ return sp->private;
100406
100407 align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
100408- m = (unsigned int *)(block - align);
100409- return SLOB_UNITS(*m) * SLOB_UNIT;
100410+ m = (slob_t *)(block - align);
100411+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
100412 }
100413 EXPORT_SYMBOL(ksize);
100414
100415@@ -536,23 +638,33 @@ int __kmem_cache_create(struct kmem_cache *c, unsigned long flags)
100416
100417 void *slob_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
100418 {
100419- void *b;
100420+ void *b = NULL;
100421
100422 flags &= gfp_allowed_mask;
100423
100424 lockdep_trace_alloc(flags);
100425
100426+#ifdef CONFIG_PAX_USERCOPY_SLABS
100427+ b = __do_kmalloc_node_align(c->size, flags, node, _RET_IP_, c->align);
100428+#else
100429 if (c->size < PAGE_SIZE) {
100430 b = slob_alloc(c->size, flags, c->align, node);
100431 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
100432 SLOB_UNITS(c->size) * SLOB_UNIT,
100433 flags, node);
100434 } else {
100435- b = slob_new_pages(flags, get_order(c->size), node);
100436+ struct page *sp;
100437+
100438+ sp = slob_new_pages(flags, get_order(c->size), node);
100439+ if (sp) {
100440+ b = page_address(sp);
100441+ sp->private = c->size;
100442+ }
100443 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
100444 PAGE_SIZE << get_order(c->size),
100445 flags, node);
100446 }
100447+#endif
100448
100449 if (b && c->ctor)
100450 c->ctor(b);
100451@@ -582,12 +694,16 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t gfp, int node)
100452 EXPORT_SYMBOL(kmem_cache_alloc_node);
100453 #endif
100454
100455-static void __kmem_cache_free(void *b, int size)
100456+static void __kmem_cache_free(struct kmem_cache *c, void *b, int size)
100457 {
100458- if (size < PAGE_SIZE)
100459- slob_free(b, size);
100460+ struct page *sp;
100461+
100462+ sp = virt_to_page(b);
100463+ BUG_ON(!PageSlab(sp));
100464+ if (!sp->private)
100465+ slob_free(c, b, size);
100466 else
100467- slob_free_pages(b, get_order(size));
100468+ slob_free_pages(sp, get_order(size));
100469 }
100470
100471 static void kmem_rcu_free(struct rcu_head *head)
100472@@ -595,22 +711,36 @@ static void kmem_rcu_free(struct rcu_head *head)
100473 struct slob_rcu *slob_rcu = (struct slob_rcu *)head;
100474 void *b = (void *)slob_rcu - (slob_rcu->size - sizeof(struct slob_rcu));
100475
100476- __kmem_cache_free(b, slob_rcu->size);
100477+ __kmem_cache_free(NULL, b, slob_rcu->size);
100478 }
100479
100480 void kmem_cache_free(struct kmem_cache *c, void *b)
100481 {
100482+ int size = c->size;
100483+
100484+#ifdef CONFIG_PAX_USERCOPY_SLABS
100485+ if (size + c->align < PAGE_SIZE) {
100486+ size += c->align;
100487+ b -= c->align;
100488+ }
100489+#endif
100490+
100491 kmemleak_free_recursive(b, c->flags);
100492 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
100493 struct slob_rcu *slob_rcu;
100494- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
100495- slob_rcu->size = c->size;
100496+ slob_rcu = b + (size - sizeof(struct slob_rcu));
100497+ slob_rcu->size = size;
100498 call_rcu(&slob_rcu->head, kmem_rcu_free);
100499 } else {
100500- __kmem_cache_free(b, c->size);
100501+ __kmem_cache_free(c, b, size);
100502 }
100503
100504+#ifdef CONFIG_PAX_USERCOPY_SLABS
100505+ trace_kfree(_RET_IP_, b);
100506+#else
100507 trace_kmem_cache_free(_RET_IP_, b);
100508+#endif
100509+
100510 }
100511 EXPORT_SYMBOL(kmem_cache_free);
100512
100513diff --git a/mm/slub.c b/mm/slub.c
100514index 3e8afcc..d6e2c89 100644
100515--- a/mm/slub.c
100516+++ b/mm/slub.c
100517@@ -207,7 +207,7 @@ struct track {
100518
100519 enum track_item { TRACK_ALLOC, TRACK_FREE };
100520
100521-#ifdef CONFIG_SYSFS
100522+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
100523 static int sysfs_slab_add(struct kmem_cache *);
100524 static int sysfs_slab_alias(struct kmem_cache *, const char *);
100525 static void memcg_propagate_slab_attrs(struct kmem_cache *s);
100526@@ -545,7 +545,7 @@ static void print_track(const char *s, struct track *t)
100527 if (!t->addr)
100528 return;
100529
100530- pr_err("INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
100531+ pr_err("INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
100532 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
100533 #ifdef CONFIG_STACKTRACE
100534 {
100535@@ -2643,6 +2643,14 @@ static __always_inline void slab_free(struct kmem_cache *s,
100536
100537 slab_free_hook(s, x);
100538
100539+#ifdef CONFIG_PAX_MEMORY_SANITIZE
100540+ if (!(s->flags & SLAB_NO_SANITIZE)) {
100541+ memset(x, PAX_MEMORY_SANITIZE_VALUE, s->object_size);
100542+ if (s->ctor)
100543+ s->ctor(x);
100544+ }
100545+#endif
100546+
100547 redo:
100548 /*
100549 * Determine the currently cpus per cpu slab.
100550@@ -2710,7 +2718,7 @@ static int slub_min_objects;
100551 * Merge control. If this is set then no merging of slab caches will occur.
100552 * (Could be removed. This was introduced to pacify the merge skeptics.)
100553 */
100554-static int slub_nomerge;
100555+static int slub_nomerge = 1;
100556
100557 /*
100558 * Calculate the order of allocation given an slab object size.
100559@@ -2986,6 +2994,9 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
100560 s->inuse = size;
100561
100562 if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
100563+#ifdef CONFIG_PAX_MEMORY_SANITIZE
100564+ (!(flags & SLAB_NO_SANITIZE)) ||
100565+#endif
100566 s->ctor)) {
100567 /*
100568 * Relocate free pointer after the object if it is not
100569@@ -3313,6 +3324,59 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
100570 EXPORT_SYMBOL(__kmalloc_node);
100571 #endif
100572
100573+bool is_usercopy_object(const void *ptr)
100574+{
100575+ struct page *page;
100576+ struct kmem_cache *s;
100577+
100578+ if (ZERO_OR_NULL_PTR(ptr))
100579+ return false;
100580+
100581+ if (!slab_is_available())
100582+ return false;
100583+
100584+ if (!virt_addr_valid(ptr))
100585+ return false;
100586+
100587+ page = virt_to_head_page(ptr);
100588+
100589+ if (!PageSlab(page))
100590+ return false;
100591+
100592+ s = page->slab_cache;
100593+ return s->flags & SLAB_USERCOPY;
100594+}
100595+
100596+#ifdef CONFIG_PAX_USERCOPY
100597+const char *check_heap_object(const void *ptr, unsigned long n)
100598+{
100599+ struct page *page;
100600+ struct kmem_cache *s;
100601+ unsigned long offset;
100602+
100603+ if (ZERO_OR_NULL_PTR(ptr))
100604+ return "<null>";
100605+
100606+ if (!virt_addr_valid(ptr))
100607+ return NULL;
100608+
100609+ page = virt_to_head_page(ptr);
100610+
100611+ if (!PageSlab(page))
100612+ return NULL;
100613+
100614+ s = page->slab_cache;
100615+ if (!(s->flags & SLAB_USERCOPY))
100616+ return s->name;
100617+
100618+ offset = (ptr - page_address(page)) % s->size;
100619+ if (offset <= s->object_size && n <= s->object_size - offset)
100620+ return NULL;
100621+
100622+ return s->name;
100623+}
100624+#endif
100625+
100626 size_t ksize(const void *object)
100627 {
100628 struct page *page;
100629@@ -3341,6 +3405,7 @@ void kfree(const void *x)
100630 if (unlikely(ZERO_OR_NULL_PTR(x)))
100631 return;
100632
100633+ VM_BUG_ON(!virt_addr_valid(x));
100634 page = virt_to_head_page(x);
100635 if (unlikely(!PageSlab(page))) {
100636 BUG_ON(!PageCompound(page));
100637@@ -3642,7 +3707,7 @@ static int slab_unmergeable(struct kmem_cache *s)
100638 /*
100639 * We may have set a slab to be unmergeable during bootstrap.
100640 */
100641- if (s->refcount < 0)
100642+ if (atomic_read(&s->refcount) < 0)
100643 return 1;
100644
100645 return 0;
100646@@ -3699,7 +3764,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
100647 int i;
100648 struct kmem_cache *c;
100649
100650- s->refcount++;
100651+ atomic_inc(&s->refcount);
100652
100653 /*
100654 * Adjust the object sizes so that we clear
100655@@ -3718,7 +3783,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
100656 }
100657
100658 if (sysfs_slab_alias(s, name)) {
100659- s->refcount--;
100660+ atomic_dec(&s->refcount);
100661 s = NULL;
100662 }
100663 }
100664@@ -3835,7 +3900,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
100665 }
100666 #endif
100667
100668-#ifdef CONFIG_SYSFS
100669+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
100670 static int count_inuse(struct page *page)
100671 {
100672 return page->inuse;
100673@@ -4116,7 +4181,11 @@ static int list_locations(struct kmem_cache *s, char *buf,
100674 len += sprintf(buf + len, "%7ld ", l->count);
100675
100676 if (l->addr)
100677+#ifdef CONFIG_GRKERNSEC_HIDESYM
100678+ len += sprintf(buf + len, "%pS", NULL);
100679+#else
100680 len += sprintf(buf + len, "%pS", (void *)l->addr);
100681+#endif
100682 else
100683 len += sprintf(buf + len, "<not-available>");
100684
100685@@ -4218,12 +4287,12 @@ static void __init resiliency_test(void)
100686 validate_slab_cache(kmalloc_caches[9]);
100687 }
100688 #else
100689-#ifdef CONFIG_SYSFS
100690+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
100691 static void resiliency_test(void) {};
100692 #endif
100693 #endif
100694
100695-#ifdef CONFIG_SYSFS
100696+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
100697 enum slab_stat_type {
100698 SL_ALL, /* All slabs */
100699 SL_PARTIAL, /* Only partially allocated slabs */
100700@@ -4460,13 +4529,17 @@ static ssize_t ctor_show(struct kmem_cache *s, char *buf)
100701 {
100702 if (!s->ctor)
100703 return 0;
100704+#ifdef CONFIG_GRKERNSEC_HIDESYM
100705+ return sprintf(buf, "%pS\n", NULL);
100706+#else
100707 return sprintf(buf, "%pS\n", s->ctor);
100708+#endif
100709 }
100710 SLAB_ATTR_RO(ctor);
100711
100712 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
100713 {
100714- return sprintf(buf, "%d\n", s->refcount < 0 ? 0 : s->refcount - 1);
100715+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) < 0 ? 0 : atomic_read(&s->refcount) - 1);
100716 }
100717 SLAB_ATTR_RO(aliases);
100718
100719@@ -4554,6 +4627,22 @@ static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
100720 SLAB_ATTR_RO(cache_dma);
100721 #endif
100722
100723+#ifdef CONFIG_PAX_USERCOPY_SLABS
100724+static ssize_t usercopy_show(struct kmem_cache *s, char *buf)
100725+{
100726+ return sprintf(buf, "%d\n", !!(s->flags & SLAB_USERCOPY));
100727+}
100728+SLAB_ATTR_RO(usercopy);
100729+#endif
100730+
100731+#ifdef CONFIG_PAX_MEMORY_SANITIZE
100732+static ssize_t sanitize_show(struct kmem_cache *s, char *buf)
100733+{
100734+ return sprintf(buf, "%d\n", !(s->flags & SLAB_NO_SANITIZE));
100735+}
100736+SLAB_ATTR_RO(sanitize);
100737+#endif
100738+
100739 static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
100740 {
100741 return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU));
100742@@ -4888,6 +4977,12 @@ static struct attribute *slab_attrs[] = {
100743 #ifdef CONFIG_ZONE_DMA
100744 &cache_dma_attr.attr,
100745 #endif
100746+#ifdef CONFIG_PAX_USERCOPY_SLABS
100747+ &usercopy_attr.attr,
100748+#endif
100749+#ifdef CONFIG_PAX_MEMORY_SANITIZE
100750+ &sanitize_attr.attr,
100751+#endif
100752 #ifdef CONFIG_NUMA
100753 &remote_node_defrag_ratio_attr.attr,
100754 #endif
100755@@ -5132,6 +5227,7 @@ static char *create_unique_id(struct kmem_cache *s)
100756 return name;
100757 }
100758
100759+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
100760 static int sysfs_slab_add(struct kmem_cache *s)
100761 {
100762 int err;
100763@@ -5205,6 +5301,7 @@ void sysfs_slab_remove(struct kmem_cache *s)
100764 kobject_del(&s->kobj);
100765 kobject_put(&s->kobj);
100766 }
100767+#endif
100768
100769 /*
100770 * Need to buffer aliases during bootup until sysfs becomes
100771@@ -5218,6 +5315,7 @@ struct saved_alias {
100772
100773 static struct saved_alias *alias_list;
100774
100775+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
100776 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
100777 {
100778 struct saved_alias *al;
100779@@ -5240,6 +5338,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
100780 alias_list = al;
100781 return 0;
100782 }
100783+#endif
100784
100785 static int __init slab_sysfs_init(void)
100786 {
100787diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
100788index 4cba9c2..b4f9fcc 100644
100789--- a/mm/sparse-vmemmap.c
100790+++ b/mm/sparse-vmemmap.c
100791@@ -131,7 +131,7 @@ pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node)
100792 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
100793 if (!p)
100794 return NULL;
100795- pud_populate(&init_mm, pud, p);
100796+ pud_populate_kernel(&init_mm, pud, p);
100797 }
100798 return pud;
100799 }
100800@@ -143,7 +143,7 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
100801 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
100802 if (!p)
100803 return NULL;
100804- pgd_populate(&init_mm, pgd, p);
100805+ pgd_populate_kernel(&init_mm, pgd, p);
100806 }
100807 return pgd;
100808 }
100809diff --git a/mm/sparse.c b/mm/sparse.c
100810index d1b48b6..6e8590e 100644
100811--- a/mm/sparse.c
100812+++ b/mm/sparse.c
100813@@ -750,7 +750,7 @@ static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
100814
100815 for (i = 0; i < PAGES_PER_SECTION; i++) {
100816 if (PageHWPoison(&memmap[i])) {
100817- atomic_long_sub(1, &num_poisoned_pages);
100818+ atomic_long_sub_unchecked(1, &num_poisoned_pages);
100819 ClearPageHWPoison(&memmap[i]);
100820 }
100821 }
100822diff --git a/mm/swap.c b/mm/swap.c
100823index 6b2dc38..46b79ba 100644
100824--- a/mm/swap.c
100825+++ b/mm/swap.c
100826@@ -31,6 +31,7 @@
100827 #include <linux/memcontrol.h>
100828 #include <linux/gfp.h>
100829 #include <linux/uio.h>
100830+#include <linux/hugetlb.h>
100831
100832 #include "internal.h"
100833
100834@@ -77,6 +78,8 @@ static void __put_compound_page(struct page *page)
100835
100836 __page_cache_release(page);
100837 dtor = get_compound_page_dtor(page);
100838+ if (!PageHuge(page))
100839+ BUG_ON(dtor != free_compound_page);
100840 (*dtor)(page);
100841 }
100842
100843diff --git a/mm/swapfile.c b/mm/swapfile.c
100844index 8798b2e..348f9dd 100644
100845--- a/mm/swapfile.c
100846+++ b/mm/swapfile.c
100847@@ -84,7 +84,7 @@ static DEFINE_MUTEX(swapon_mutex);
100848
100849 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
100850 /* Activity counter to indicate that a swapon or swapoff has occurred */
100851-static atomic_t proc_poll_event = ATOMIC_INIT(0);
100852+static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
100853
100854 static inline unsigned char swap_count(unsigned char ent)
100855 {
100856@@ -1944,7 +1944,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
100857 spin_unlock(&swap_lock);
100858
100859 err = 0;
100860- atomic_inc(&proc_poll_event);
100861+ atomic_inc_unchecked(&proc_poll_event);
100862 wake_up_interruptible(&proc_poll_wait);
100863
100864 out_dput:
100865@@ -1961,8 +1961,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
100866
100867 poll_wait(file, &proc_poll_wait, wait);
100868
100869- if (seq->poll_event != atomic_read(&proc_poll_event)) {
100870- seq->poll_event = atomic_read(&proc_poll_event);
100871+ if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
100872+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
100873 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
100874 }
100875
100876@@ -2060,7 +2060,7 @@ static int swaps_open(struct inode *inode, struct file *file)
100877 return ret;
100878
100879 seq = file->private_data;
100880- seq->poll_event = atomic_read(&proc_poll_event);
100881+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
100882 return 0;
100883 }
100884
100885@@ -2520,7 +2520,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
100886 (frontswap_map) ? "FS" : "");
100887
100888 mutex_unlock(&swapon_mutex);
100889- atomic_inc(&proc_poll_event);
100890+ atomic_inc_unchecked(&proc_poll_event);
100891 wake_up_interruptible(&proc_poll_wait);
100892
100893 if (S_ISREG(inode->i_mode))
100894diff --git a/mm/util.c b/mm/util.c
100895index 093c973..b70a268 100644
100896--- a/mm/util.c
100897+++ b/mm/util.c
100898@@ -202,6 +202,12 @@ done:
100899 void arch_pick_mmap_layout(struct mm_struct *mm)
100900 {
100901 mm->mmap_base = TASK_UNMAPPED_BASE;
100902+
100903+#ifdef CONFIG_PAX_RANDMMAP
100904+ if (mm->pax_flags & MF_PAX_RANDMMAP)
100905+ mm->mmap_base += mm->delta_mmap;
100906+#endif
100907+
100908 mm->get_unmapped_area = arch_get_unmapped_area;
100909 }
100910 #endif
100911@@ -378,6 +384,9 @@ int get_cmdline(struct task_struct *task, char *buffer, int buflen)
100912 if (!mm->arg_end)
100913 goto out_mm; /* Shh! No looking before we're done */
100914
100915+ if (gr_acl_handle_procpidmem(task))
100916+ goto out_mm;
100917+
100918 len = mm->arg_end - mm->arg_start;
100919
100920 if (len > buflen)
100921diff --git a/mm/vmalloc.c b/mm/vmalloc.c
100922index 2b0aa54..b451f74 100644
100923--- a/mm/vmalloc.c
100924+++ b/mm/vmalloc.c
100925@@ -40,6 +40,21 @@ struct vfree_deferred {
100926 };
100927 static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);
100928
100929+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
100930+struct stack_deferred_llist {
100931+ struct llist_head list;
100932+ void *stack;
100933+ void *lowmem_stack;
100934+};
100935+
100936+struct stack_deferred {
100937+ struct stack_deferred_llist list;
100938+ struct work_struct wq;
100939+};
100940+
100941+static DEFINE_PER_CPU(struct stack_deferred, stack_deferred);
100942+#endif
100943+
100944 static void __vunmap(const void *, int);
100945
100946 static void free_work(struct work_struct *w)
100947@@ -47,12 +62,30 @@ static void free_work(struct work_struct *w)
100948 struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
100949 struct llist_node *llnode = llist_del_all(&p->list);
100950 while (llnode) {
100951- void *p = llnode;
100952+ void *x = llnode;
100953 llnode = llist_next(llnode);
100954- __vunmap(p, 1);
100955+ __vunmap(x, 1);
100956 }
100957 }
100958
100959+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
100960+static void unmap_work(struct work_struct *w)
100961+{
100962+ struct stack_deferred *p = container_of(w, struct stack_deferred, wq);
100963+ struct llist_node *llnode = llist_del_all(&p->list.list);
100964+ while (llnode) {
100965+ struct stack_deferred_llist *x =
100966+ llist_entry((struct llist_head *)llnode,
100967+ struct stack_deferred_llist, list);
100968+ void *stack = ACCESS_ONCE(x->stack);
100969+ void *lowmem_stack = ACCESS_ONCE(x->lowmem_stack);
100970+ llnode = llist_next(llnode);
100971+ __vunmap(stack, 0);
100972+ free_kmem_pages((unsigned long)lowmem_stack, THREAD_SIZE_ORDER);
100973+ }
100974+}
100975+#endif
100976+
100977 /*** Page table manipulation functions ***/
100978
100979 static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
100980@@ -61,8 +94,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
100981
100982 pte = pte_offset_kernel(pmd, addr);
100983 do {
100984- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
100985- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
100986+
100987+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
100988+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
100989+ BUG_ON(!pte_exec(*pte));
100990+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
100991+ continue;
100992+ }
100993+#endif
100994+
100995+ {
100996+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
100997+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
100998+ }
100999 } while (pte++, addr += PAGE_SIZE, addr != end);
101000 }
101001
101002@@ -122,16 +166,29 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
101003 pte = pte_alloc_kernel(pmd, addr);
101004 if (!pte)
101005 return -ENOMEM;
101006+
101007+ pax_open_kernel();
101008 do {
101009 struct page *page = pages[*nr];
101010
101011- if (WARN_ON(!pte_none(*pte)))
101012+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
101013+ if (pgprot_val(prot) & _PAGE_NX)
101014+#endif
101015+
101016+ if (!pte_none(*pte)) {
101017+ pax_close_kernel();
101018+ WARN_ON(1);
101019 return -EBUSY;
101020- if (WARN_ON(!page))
101021+ }
101022+ if (!page) {
101023+ pax_close_kernel();
101024+ WARN_ON(1);
101025 return -ENOMEM;
101026+ }
101027 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
101028 (*nr)++;
101029 } while (pte++, addr += PAGE_SIZE, addr != end);
101030+ pax_close_kernel();
101031 return 0;
101032 }
101033
101034@@ -141,7 +198,7 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr,
101035 pmd_t *pmd;
101036 unsigned long next;
101037
101038- pmd = pmd_alloc(&init_mm, pud, addr);
101039+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
101040 if (!pmd)
101041 return -ENOMEM;
101042 do {
101043@@ -158,7 +215,7 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
101044 pud_t *pud;
101045 unsigned long next;
101046
101047- pud = pud_alloc(&init_mm, pgd, addr);
101048+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
101049 if (!pud)
101050 return -ENOMEM;
101051 do {
101052@@ -218,6 +275,12 @@ int is_vmalloc_or_module_addr(const void *x)
101053 if (addr >= MODULES_VADDR && addr < MODULES_END)
101054 return 1;
101055 #endif
101056+
101057+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
101058+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
101059+ return 1;
101060+#endif
101061+
101062 return is_vmalloc_addr(x);
101063 }
101064
101065@@ -238,8 +301,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
101066
101067 if (!pgd_none(*pgd)) {
101068 pud_t *pud = pud_offset(pgd, addr);
101069+#ifdef CONFIG_X86
101070+ if (!pud_large(*pud))
101071+#endif
101072 if (!pud_none(*pud)) {
101073 pmd_t *pmd = pmd_offset(pud, addr);
101074+#ifdef CONFIG_X86
101075+ if (!pmd_large(*pmd))
101076+#endif
101077 if (!pmd_none(*pmd)) {
101078 pte_t *ptep, pte;
101079
101080@@ -1183,13 +1252,23 @@ void __init vmalloc_init(void)
101081 for_each_possible_cpu(i) {
101082 struct vmap_block_queue *vbq;
101083 struct vfree_deferred *p;
101084+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
101085+ struct stack_deferred *p2;
101086+#endif
101087
101088 vbq = &per_cpu(vmap_block_queue, i);
101089 spin_lock_init(&vbq->lock);
101090 INIT_LIST_HEAD(&vbq->free);
101091+
101092 p = &per_cpu(vfree_deferred, i);
101093 init_llist_head(&p->list);
101094 INIT_WORK(&p->wq, free_work);
101095+
101096+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
101097+ p2 = &per_cpu(stack_deferred, i);
101098+ init_llist_head(&p2->list.list);
101099+ INIT_WORK(&p2->wq, unmap_work);
101100+#endif
101101 }
101102
101103 /* Import existing vmlist entries. */
101104@@ -1314,6 +1393,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
101105 struct vm_struct *area;
101106
101107 BUG_ON(in_interrupt());
101108+
101109+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
101110+ if (flags & VM_KERNEXEC) {
101111+ if (start != VMALLOC_START || end != VMALLOC_END)
101112+ return NULL;
101113+ start = (unsigned long)MODULES_EXEC_VADDR;
101114+ end = (unsigned long)MODULES_EXEC_END;
101115+ }
101116+#endif
101117+
101118 if (flags & VM_IOREMAP)
101119 align = 1ul << clamp(fls(size), PAGE_SHIFT, IOREMAP_MAX_ORDER);
101120
101121@@ -1519,6 +1608,23 @@ void vunmap(const void *addr)
101122 }
101123 EXPORT_SYMBOL(vunmap);
101124
101125+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
101126+void unmap_process_stacks(struct task_struct *task)
101127+{
101128+ if (unlikely(in_interrupt())) {
101129+ struct stack_deferred *p = &__get_cpu_var(stack_deferred);
101130+ struct stack_deferred_llist *list = task->stack;
101131+ list->stack = task->stack;
101132+ list->lowmem_stack = task->lowmem_stack;
101133+ if (llist_add((struct llist_node *)&list->list, &p->list.list))
101134+ schedule_work(&p->wq);
101135+ } else {
101136+ __vunmap(task->stack, 0);
101137+ free_kmem_pages((unsigned long)task->lowmem_stack, THREAD_SIZE_ORDER);
101138+ }
101139+}
101140+#endif
101141+
101142 /**
101143 * vmap - map an array of pages into virtually contiguous space
101144 * @pages: array of page pointers
101145@@ -1539,6 +1645,11 @@ void *vmap(struct page **pages, unsigned int count,
101146 if (count > totalram_pages)
101147 return NULL;
101148
101149+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
101150+ if (!(pgprot_val(prot) & _PAGE_NX))
101151+ flags |= VM_KERNEXEC;
101152+#endif
101153+
101154 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
101155 __builtin_return_address(0));
101156 if (!area)
101157@@ -1641,6 +1752,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
101158 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
101159 goto fail;
101160
101161+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
101162+ if (!(pgprot_val(prot) & _PAGE_NX))
101163+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED | VM_KERNEXEC,
101164+ VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
101165+ else
101166+#endif
101167+
101168 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED,
101169 start, end, node, gfp_mask, caller);
101170 if (!area)
101171@@ -1817,10 +1935,9 @@ EXPORT_SYMBOL(vzalloc_node);
101172 * For tight control over page level allocator and protection flags
101173 * use __vmalloc() instead.
101174 */
101175-
101176 void *vmalloc_exec(unsigned long size)
101177 {
101178- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
101179+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
101180 NUMA_NO_NODE, __builtin_return_address(0));
101181 }
101182
101183@@ -2127,6 +2244,8 @@ int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
101184 {
101185 struct vm_struct *area;
101186
101187+ BUG_ON(vma->vm_mirror);
101188+
101189 size = PAGE_ALIGN(size);
101190
101191 if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr))
101192@@ -2609,7 +2728,11 @@ static int s_show(struct seq_file *m, void *p)
101193 v->addr, v->addr + v->size, v->size);
101194
101195 if (v->caller)
101196+#ifdef CONFIG_GRKERNSEC_HIDESYM
101197+ seq_printf(m, " %pK", v->caller);
101198+#else
101199 seq_printf(m, " %pS", v->caller);
101200+#endif
101201
101202 if (v->nr_pages)
101203 seq_printf(m, " pages=%d", v->nr_pages);
101204diff --git a/mm/vmstat.c b/mm/vmstat.c
101205index e9ab104..de275bd 100644
101206--- a/mm/vmstat.c
101207+++ b/mm/vmstat.c
101208@@ -20,6 +20,7 @@
101209 #include <linux/writeback.h>
101210 #include <linux/compaction.h>
101211 #include <linux/mm_inline.h>
101212+#include <linux/grsecurity.h>
101213
101214 #include "internal.h"
101215
101216@@ -79,7 +80,7 @@ void vm_events_fold_cpu(int cpu)
101217 *
101218 * vm_stat contains the global counters
101219 */
101220-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
101221+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
101222 EXPORT_SYMBOL(vm_stat);
101223
101224 #ifdef CONFIG_SMP
101225@@ -425,7 +426,7 @@ static inline void fold_diff(int *diff)
101226
101227 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
101228 if (diff[i])
101229- atomic_long_add(diff[i], &vm_stat[i]);
101230+ atomic_long_add_unchecked(diff[i], &vm_stat[i]);
101231 }
101232
101233 /*
101234@@ -457,7 +458,7 @@ static void refresh_cpu_vm_stats(void)
101235 v = this_cpu_xchg(p->vm_stat_diff[i], 0);
101236 if (v) {
101237
101238- atomic_long_add(v, &zone->vm_stat[i]);
101239+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
101240 global_diff[i] += v;
101241 #ifdef CONFIG_NUMA
101242 /* 3 seconds idle till flush */
101243@@ -519,7 +520,7 @@ void cpu_vm_stats_fold(int cpu)
101244
101245 v = p->vm_stat_diff[i];
101246 p->vm_stat_diff[i] = 0;
101247- atomic_long_add(v, &zone->vm_stat[i]);
101248+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
101249 global_diff[i] += v;
101250 }
101251 }
101252@@ -539,8 +540,8 @@ void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
101253 if (pset->vm_stat_diff[i]) {
101254 int v = pset->vm_stat_diff[i];
101255 pset->vm_stat_diff[i] = 0;
101256- atomic_long_add(v, &zone->vm_stat[i]);
101257- atomic_long_add(v, &vm_stat[i]);
101258+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
101259+ atomic_long_add_unchecked(v, &vm_stat[i]);
101260 }
101261 }
101262 #endif
101263@@ -1163,10 +1164,22 @@ static void *vmstat_start(struct seq_file *m, loff_t *pos)
101264 stat_items_size += sizeof(struct vm_event_state);
101265 #endif
101266
101267- v = kmalloc(stat_items_size, GFP_KERNEL);
101268+ v = kzalloc(stat_items_size, GFP_KERNEL);
101269 m->private = v;
101270 if (!v)
101271 return ERR_PTR(-ENOMEM);
101272+
101273+#ifdef CONFIG_GRKERNSEC_PROC_ADD
101274+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
101275+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)
101276+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
101277+ && !in_group_p(grsec_proc_gid)
101278+#endif
101279+ )
101280+ return (unsigned long *)m->private + *pos;
101281+#endif
101282+#endif
101283+
101284 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
101285 v[i] = global_page_state(i);
101286 v += NR_VM_ZONE_STAT_ITEMS;
101287@@ -1315,10 +1328,16 @@ static int __init setup_vmstat(void)
101288 cpu_notifier_register_done();
101289 #endif
101290 #ifdef CONFIG_PROC_FS
101291- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
101292- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
101293- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
101294- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
101295+ {
101296+ mode_t gr_mode = S_IRUGO;
101297+#ifdef CONFIG_GRKERNSEC_PROC_ADD
101298+ gr_mode = S_IRUSR;
101299+#endif
101300+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
101301+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
101302+ proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
101303+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
101304+ }
101305 #endif
101306 return 0;
101307 }
101308diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
101309index 64c6bed..b79a5de 100644
101310--- a/net/8021q/vlan.c
101311+++ b/net/8021q/vlan.c
101312@@ -481,7 +481,7 @@ out:
101313 return NOTIFY_DONE;
101314 }
101315
101316-static struct notifier_block vlan_notifier_block __read_mostly = {
101317+static struct notifier_block vlan_notifier_block = {
101318 .notifier_call = vlan_device_event,
101319 };
101320
101321@@ -556,8 +556,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
101322 err = -EPERM;
101323 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
101324 break;
101325- if ((args.u.name_type >= 0) &&
101326- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
101327+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
101328 struct vlan_net *vn;
101329
101330 vn = net_generic(net, vlan_net_id);
101331diff --git a/net/9p/client.c b/net/9p/client.c
101332index e86a9bea..e91f70e 100644
101333--- a/net/9p/client.c
101334+++ b/net/9p/client.c
101335@@ -596,7 +596,7 @@ static int p9_check_zc_errors(struct p9_client *c, struct p9_req_t *req,
101336 len - inline_len);
101337 } else {
101338 err = copy_from_user(ename + inline_len,
101339- uidata, len - inline_len);
101340+ (char __force_user *)uidata, len - inline_len);
101341 if (err) {
101342 err = -EFAULT;
101343 goto out_err;
101344@@ -1570,7 +1570,7 @@ p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset,
101345 kernel_buf = 1;
101346 indata = data;
101347 } else
101348- indata = (__force char *)udata;
101349+ indata = (__force_kernel char *)udata;
101350 /*
101351 * response header len is 11
101352 * PDU Header(7) + IO Size (4)
101353@@ -1645,7 +1645,7 @@ p9_client_write(struct p9_fid *fid, char *data, const char __user *udata,
101354 kernel_buf = 1;
101355 odata = data;
101356 } else
101357- odata = (char *)udata;
101358+ odata = (char __force_kernel *)udata;
101359 req = p9_client_zc_rpc(clnt, P9_TWRITE, NULL, odata, 0, rsize,
101360 P9_ZC_HDR_SZ, kernel_buf, "dqd",
101361 fid->fid, offset, rsize);
101362diff --git a/net/9p/mod.c b/net/9p/mod.c
101363index 6ab36ae..6f1841b 100644
101364--- a/net/9p/mod.c
101365+++ b/net/9p/mod.c
101366@@ -84,7 +84,7 @@ static LIST_HEAD(v9fs_trans_list);
101367 void v9fs_register_trans(struct p9_trans_module *m)
101368 {
101369 spin_lock(&v9fs_trans_lock);
101370- list_add_tail(&m->list, &v9fs_trans_list);
101371+ pax_list_add_tail((struct list_head *)&m->list, &v9fs_trans_list);
101372 spin_unlock(&v9fs_trans_lock);
101373 }
101374 EXPORT_SYMBOL(v9fs_register_trans);
101375@@ -97,7 +97,7 @@ EXPORT_SYMBOL(v9fs_register_trans);
101376 void v9fs_unregister_trans(struct p9_trans_module *m)
101377 {
101378 spin_lock(&v9fs_trans_lock);
101379- list_del_init(&m->list);
101380+ pax_list_del_init((struct list_head *)&m->list);
101381 spin_unlock(&v9fs_trans_lock);
101382 }
101383 EXPORT_SYMBOL(v9fs_unregister_trans);
101384diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
101385index 80d08f6..de63fd1 100644
101386--- a/net/9p/trans_fd.c
101387+++ b/net/9p/trans_fd.c
101388@@ -428,7 +428,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
101389 oldfs = get_fs();
101390 set_fs(get_ds());
101391 /* The cast to a user pointer is valid due to the set_fs() */
101392- ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
101393+ ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
101394 set_fs(oldfs);
101395
101396 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
101397diff --git a/net/appletalk/atalk_proc.c b/net/appletalk/atalk_proc.c
101398index af46bc4..f9adfcd 100644
101399--- a/net/appletalk/atalk_proc.c
101400+++ b/net/appletalk/atalk_proc.c
101401@@ -256,7 +256,7 @@ int __init atalk_proc_init(void)
101402 struct proc_dir_entry *p;
101403 int rc = -ENOMEM;
101404
101405- atalk_proc_dir = proc_mkdir("atalk", init_net.proc_net);
101406+ atalk_proc_dir = proc_mkdir_restrict("atalk", init_net.proc_net);
101407 if (!atalk_proc_dir)
101408 goto out;
101409
101410diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
101411index 876fbe8..8bbea9f 100644
101412--- a/net/atm/atm_misc.c
101413+++ b/net/atm/atm_misc.c
101414@@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
101415 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
101416 return 1;
101417 atm_return(vcc, truesize);
101418- atomic_inc(&vcc->stats->rx_drop);
101419+ atomic_inc_unchecked(&vcc->stats->rx_drop);
101420 return 0;
101421 }
101422 EXPORT_SYMBOL(atm_charge);
101423@@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
101424 }
101425 }
101426 atm_return(vcc, guess);
101427- atomic_inc(&vcc->stats->rx_drop);
101428+ atomic_inc_unchecked(&vcc->stats->rx_drop);
101429 return NULL;
101430 }
101431 EXPORT_SYMBOL(atm_alloc_charge);
101432@@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
101433
101434 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
101435 {
101436-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
101437+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
101438 __SONET_ITEMS
101439 #undef __HANDLE_ITEM
101440 }
101441@@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
101442
101443 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
101444 {
101445-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
101446+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
101447 __SONET_ITEMS
101448 #undef __HANDLE_ITEM
101449 }
101450diff --git a/net/atm/lec.c b/net/atm/lec.c
101451index 4b98f89..5a2f6cb 100644
101452--- a/net/atm/lec.c
101453+++ b/net/atm/lec.c
101454@@ -111,9 +111,9 @@ static inline void lec_arp_put(struct lec_arp_table *entry)
101455 }
101456
101457 static struct lane2_ops lane2_ops = {
101458- lane2_resolve, /* resolve, spec 3.1.3 */
101459- lane2_associate_req, /* associate_req, spec 3.1.4 */
101460- NULL /* associate indicator, spec 3.1.5 */
101461+ .resolve = lane2_resolve,
101462+ .associate_req = lane2_associate_req,
101463+ .associate_indicator = NULL
101464 };
101465
101466 static unsigned char bus_mac[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
101467diff --git a/net/atm/lec.h b/net/atm/lec.h
101468index 4149db1..f2ab682 100644
101469--- a/net/atm/lec.h
101470+++ b/net/atm/lec.h
101471@@ -48,7 +48,7 @@ struct lane2_ops {
101472 const u8 *tlvs, u32 sizeoftlvs);
101473 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
101474 const u8 *tlvs, u32 sizeoftlvs);
101475-};
101476+} __no_const;
101477
101478 /*
101479 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
101480diff --git a/net/atm/mpoa_caches.c b/net/atm/mpoa_caches.c
101481index d1b2d9a..d549f7f 100644
101482--- a/net/atm/mpoa_caches.c
101483+++ b/net/atm/mpoa_caches.c
101484@@ -535,30 +535,30 @@ static void eg_destroy_cache(struct mpoa_client *mpc)
101485
101486
101487 static struct in_cache_ops ingress_ops = {
101488- in_cache_add_entry, /* add_entry */
101489- in_cache_get, /* get */
101490- in_cache_get_with_mask, /* get_with_mask */
101491- in_cache_get_by_vcc, /* get_by_vcc */
101492- in_cache_put, /* put */
101493- in_cache_remove_entry, /* remove_entry */
101494- cache_hit, /* cache_hit */
101495- clear_count_and_expired, /* clear_count */
101496- check_resolving_entries, /* check_resolving */
101497- refresh_entries, /* refresh */
101498- in_destroy_cache /* destroy_cache */
101499+ .add_entry = in_cache_add_entry,
101500+ .get = in_cache_get,
101501+ .get_with_mask = in_cache_get_with_mask,
101502+ .get_by_vcc = in_cache_get_by_vcc,
101503+ .put = in_cache_put,
101504+ .remove_entry = in_cache_remove_entry,
101505+ .cache_hit = cache_hit,
101506+ .clear_count = clear_count_and_expired,
101507+ .check_resolving = check_resolving_entries,
101508+ .refresh = refresh_entries,
101509+ .destroy_cache = in_destroy_cache
101510 };
101511
101512 static struct eg_cache_ops egress_ops = {
101513- eg_cache_add_entry, /* add_entry */
101514- eg_cache_get_by_cache_id, /* get_by_cache_id */
101515- eg_cache_get_by_tag, /* get_by_tag */
101516- eg_cache_get_by_vcc, /* get_by_vcc */
101517- eg_cache_get_by_src_ip, /* get_by_src_ip */
101518- eg_cache_put, /* put */
101519- eg_cache_remove_entry, /* remove_entry */
101520- update_eg_cache_entry, /* update */
101521- clear_expired, /* clear_expired */
101522- eg_destroy_cache /* destroy_cache */
101523+ .add_entry = eg_cache_add_entry,
101524+ .get_by_cache_id = eg_cache_get_by_cache_id,
101525+ .get_by_tag = eg_cache_get_by_tag,
101526+ .get_by_vcc = eg_cache_get_by_vcc,
101527+ .get_by_src_ip = eg_cache_get_by_src_ip,
101528+ .put = eg_cache_put,
101529+ .remove_entry = eg_cache_remove_entry,
101530+ .update = update_eg_cache_entry,
101531+ .clear_expired = clear_expired,
101532+ .destroy_cache = eg_destroy_cache
101533 };
101534
101535
101536diff --git a/net/atm/proc.c b/net/atm/proc.c
101537index bbb6461..cf04016 100644
101538--- a/net/atm/proc.c
101539+++ b/net/atm/proc.c
101540@@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
101541 const struct k_atm_aal_stats *stats)
101542 {
101543 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
101544- atomic_read(&stats->tx), atomic_read(&stats->tx_err),
101545- atomic_read(&stats->rx), atomic_read(&stats->rx_err),
101546- atomic_read(&stats->rx_drop));
101547+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
101548+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
101549+ atomic_read_unchecked(&stats->rx_drop));
101550 }
101551
101552 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
101553diff --git a/net/atm/resources.c b/net/atm/resources.c
101554index 0447d5d..3cf4728 100644
101555--- a/net/atm/resources.c
101556+++ b/net/atm/resources.c
101557@@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
101558 static void copy_aal_stats(struct k_atm_aal_stats *from,
101559 struct atm_aal_stats *to)
101560 {
101561-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
101562+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
101563 __AAL_STAT_ITEMS
101564 #undef __HANDLE_ITEM
101565 }
101566@@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
101567 static void subtract_aal_stats(struct k_atm_aal_stats *from,
101568 struct atm_aal_stats *to)
101569 {
101570-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
101571+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
101572 __AAL_STAT_ITEMS
101573 #undef __HANDLE_ITEM
101574 }
101575diff --git a/net/ax25/sysctl_net_ax25.c b/net/ax25/sysctl_net_ax25.c
101576index 919a5ce..cc6b444 100644
101577--- a/net/ax25/sysctl_net_ax25.c
101578+++ b/net/ax25/sysctl_net_ax25.c
101579@@ -152,7 +152,7 @@ int ax25_register_dev_sysctl(ax25_dev *ax25_dev)
101580 {
101581 char path[sizeof("net/ax25/") + IFNAMSIZ];
101582 int k;
101583- struct ctl_table *table;
101584+ ctl_table_no_const *table;
101585
101586 table = kmemdup(ax25_param_table, sizeof(ax25_param_table), GFP_KERNEL);
101587 if (!table)
101588diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
101589index 1e80539..676c37a 100644
101590--- a/net/batman-adv/bat_iv_ogm.c
101591+++ b/net/batman-adv/bat_iv_ogm.c
101592@@ -313,7 +313,7 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
101593
101594 /* randomize initial seqno to avoid collision */
101595 get_random_bytes(&random_seqno, sizeof(random_seqno));
101596- atomic_set(&hard_iface->bat_iv.ogm_seqno, random_seqno);
101597+ atomic_set_unchecked(&hard_iface->bat_iv.ogm_seqno, random_seqno);
101598
101599 hard_iface->bat_iv.ogm_buff_len = BATADV_OGM_HLEN;
101600 ogm_buff = kmalloc(hard_iface->bat_iv.ogm_buff_len, GFP_ATOMIC);
101601@@ -918,9 +918,9 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
101602 batadv_ogm_packet->tvlv_len = htons(tvlv_len);
101603
101604 /* change sequence number to network order */
101605- seqno = (uint32_t)atomic_read(&hard_iface->bat_iv.ogm_seqno);
101606+ seqno = (uint32_t)atomic_read_unchecked(&hard_iface->bat_iv.ogm_seqno);
101607 batadv_ogm_packet->seqno = htonl(seqno);
101608- atomic_inc(&hard_iface->bat_iv.ogm_seqno);
101609+ atomic_inc_unchecked(&hard_iface->bat_iv.ogm_seqno);
101610
101611 batadv_iv_ogm_slide_own_bcast_window(hard_iface);
101612
101613@@ -1597,7 +1597,7 @@ static void batadv_iv_ogm_process(const struct sk_buff *skb, int ogm_offset,
101614 return;
101615
101616 /* could be changed by schedule_own_packet() */
101617- if_incoming_seqno = atomic_read(&if_incoming->bat_iv.ogm_seqno);
101618+ if_incoming_seqno = atomic_read_unchecked(&if_incoming->bat_iv.ogm_seqno);
101619
101620 if (ogm_packet->flags & BATADV_DIRECTLINK)
101621 has_directlink_flag = true;
101622diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
101623index fc1835c..eead856 100644
101624--- a/net/batman-adv/fragmentation.c
101625+++ b/net/batman-adv/fragmentation.c
101626@@ -450,7 +450,7 @@ bool batadv_frag_send_packet(struct sk_buff *skb,
101627 frag_header.packet_type = BATADV_UNICAST_FRAG;
101628 frag_header.version = BATADV_COMPAT_VERSION;
101629 frag_header.ttl = BATADV_TTL;
101630- frag_header.seqno = htons(atomic_inc_return(&bat_priv->frag_seqno));
101631+ frag_header.seqno = htons(atomic_inc_return_unchecked(&bat_priv->frag_seqno));
101632 frag_header.reserved = 0;
101633 frag_header.no = 0;
101634 frag_header.total_size = htons(skb->len);
101635diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
101636index 5467955..30cc771 100644
101637--- a/net/batman-adv/soft-interface.c
101638+++ b/net/batman-adv/soft-interface.c
101639@@ -296,7 +296,7 @@ send:
101640 primary_if->net_dev->dev_addr);
101641
101642 /* set broadcast sequence number */
101643- seqno = atomic_inc_return(&bat_priv->bcast_seqno);
101644+ seqno = atomic_inc_return_unchecked(&bat_priv->bcast_seqno);
101645 bcast_packet->seqno = htonl(seqno);
101646
101647 batadv_add_bcast_packet_to_list(bat_priv, skb, brd_delay);
101648@@ -761,7 +761,7 @@ static int batadv_softif_init_late(struct net_device *dev)
101649 atomic_set(&bat_priv->batman_queue_left, BATADV_BATMAN_QUEUE_LEN);
101650
101651 atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
101652- atomic_set(&bat_priv->bcast_seqno, 1);
101653+ atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
101654 atomic_set(&bat_priv->tt.vn, 0);
101655 atomic_set(&bat_priv->tt.local_changes, 0);
101656 atomic_set(&bat_priv->tt.ogm_append_cnt, 0);
101657@@ -775,7 +775,7 @@ static int batadv_softif_init_late(struct net_device *dev)
101658
101659 /* randomize initial seqno to avoid collision */
101660 get_random_bytes(&random_seqno, sizeof(random_seqno));
101661- atomic_set(&bat_priv->frag_seqno, random_seqno);
101662+ atomic_set_unchecked(&bat_priv->frag_seqno, random_seqno);
101663
101664 bat_priv->primary_if = NULL;
101665 bat_priv->num_ifaces = 0;
101666diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
101667index 8854c05..ee5d5497 100644
101668--- a/net/batman-adv/types.h
101669+++ b/net/batman-adv/types.h
101670@@ -67,7 +67,7 @@ enum batadv_dhcp_recipient {
101671 struct batadv_hard_iface_bat_iv {
101672 unsigned char *ogm_buff;
101673 int ogm_buff_len;
101674- atomic_t ogm_seqno;
101675+ atomic_unchecked_t ogm_seqno;
101676 };
101677
101678 /**
101679@@ -768,7 +768,7 @@ struct batadv_priv {
101680 atomic_t bonding;
101681 atomic_t fragmentation;
101682 atomic_t packet_size_max;
101683- atomic_t frag_seqno;
101684+ atomic_unchecked_t frag_seqno;
101685 #ifdef CONFIG_BATMAN_ADV_BLA
101686 atomic_t bridge_loop_avoidance;
101687 #endif
101688@@ -787,7 +787,7 @@ struct batadv_priv {
101689 #endif
101690 uint32_t isolation_mark;
101691 uint32_t isolation_mark_mask;
101692- atomic_t bcast_seqno;
101693+ atomic_unchecked_t bcast_seqno;
101694 atomic_t bcast_queue_left;
101695 atomic_t batman_queue_left;
101696 char num_ifaces;
101697diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
101698index 115f149..f0ba286 100644
101699--- a/net/bluetooth/hci_sock.c
101700+++ b/net/bluetooth/hci_sock.c
101701@@ -1067,7 +1067,7 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
101702 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
101703 }
101704
101705- len = min_t(unsigned int, len, sizeof(uf));
101706+ len = min((size_t)len, sizeof(uf));
101707 if (copy_from_user(&uf, optval, len)) {
101708 err = -EFAULT;
101709 break;
101710diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
101711index 14ca8ae..262d49a 100644
101712--- a/net/bluetooth/l2cap_core.c
101713+++ b/net/bluetooth/l2cap_core.c
101714@@ -3565,8 +3565,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
101715 break;
101716
101717 case L2CAP_CONF_RFC:
101718- if (olen == sizeof(rfc))
101719- memcpy(&rfc, (void *)val, olen);
101720+ if (olen != sizeof(rfc))
101721+ break;
101722+
101723+ memcpy(&rfc, (void *)val, olen);
101724
101725 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
101726 rfc.mode != chan->mode)
101727diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
101728index 1884f72..b3b71f9 100644
101729--- a/net/bluetooth/l2cap_sock.c
101730+++ b/net/bluetooth/l2cap_sock.c
101731@@ -629,7 +629,8 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
101732 struct sock *sk = sock->sk;
101733 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
101734 struct l2cap_options opts;
101735- int len, err = 0;
101736+ int err = 0;
101737+ size_t len = optlen;
101738 u32 opt;
101739
101740 BT_DBG("sk %p", sk);
101741@@ -656,7 +657,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
101742 opts.max_tx = chan->max_tx;
101743 opts.txwin_size = chan->tx_win;
101744
101745- len = min_t(unsigned int, sizeof(opts), optlen);
101746+ len = min(sizeof(opts), len);
101747 if (copy_from_user((char *) &opts, optval, len)) {
101748 err = -EFAULT;
101749 break;
101750@@ -743,7 +744,8 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
101751 struct bt_security sec;
101752 struct bt_power pwr;
101753 struct l2cap_conn *conn;
101754- int len, err = 0;
101755+ int err = 0;
101756+ size_t len = optlen;
101757 u32 opt;
101758
101759 BT_DBG("sk %p", sk);
101760@@ -767,7 +769,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
101761
101762 sec.level = BT_SECURITY_LOW;
101763
101764- len = min_t(unsigned int, sizeof(sec), optlen);
101765+ len = min(sizeof(sec), len);
101766 if (copy_from_user((char *) &sec, optval, len)) {
101767 err = -EFAULT;
101768 break;
101769@@ -862,7 +864,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
101770
101771 pwr.force_active = BT_POWER_FORCE_ACTIVE_ON;
101772
101773- len = min_t(unsigned int, sizeof(pwr), optlen);
101774+ len = min(sizeof(pwr), len);
101775 if (copy_from_user((char *) &pwr, optval, len)) {
101776 err = -EFAULT;
101777 break;
101778diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
101779index 8bbbb5e..6fc0950 100644
101780--- a/net/bluetooth/rfcomm/sock.c
101781+++ b/net/bluetooth/rfcomm/sock.c
101782@@ -687,7 +687,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
101783 struct sock *sk = sock->sk;
101784 struct bt_security sec;
101785 int err = 0;
101786- size_t len;
101787+ size_t len = optlen;
101788 u32 opt;
101789
101790 BT_DBG("sk %p", sk);
101791@@ -709,7 +709,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
101792
101793 sec.level = BT_SECURITY_LOW;
101794
101795- len = min_t(unsigned int, sizeof(sec), optlen);
101796+ len = min(sizeof(sec), len);
101797 if (copy_from_user((char *) &sec, optval, len)) {
101798 err = -EFAULT;
101799 break;
101800diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
101801index 8e385a0..a5bdd8e 100644
101802--- a/net/bluetooth/rfcomm/tty.c
101803+++ b/net/bluetooth/rfcomm/tty.c
101804@@ -752,7 +752,7 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
101805 BT_DBG("tty %p id %d", tty, tty->index);
101806
101807 BT_DBG("dev %p dst %pMR channel %d opened %d", dev, &dev->dst,
101808- dev->channel, dev->port.count);
101809+ dev->channel, atomic_read(&dev->port.count));
101810
101811 err = tty_port_open(&dev->port, tty, filp);
101812 if (err)
101813@@ -775,7 +775,7 @@ static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp)
101814 struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data;
101815
101816 BT_DBG("tty %p dev %p dlc %p opened %d", tty, dev, dev->dlc,
101817- dev->port.count);
101818+ atomic_read(&dev->port.count));
101819
101820 tty_port_close(&dev->port, tty, filp);
101821 }
101822diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
101823index 6d69631..b8fdc85 100644
101824--- a/net/bridge/netfilter/ebtables.c
101825+++ b/net/bridge/netfilter/ebtables.c
101826@@ -1518,7 +1518,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
101827 tmp.valid_hooks = t->table->valid_hooks;
101828 }
101829 mutex_unlock(&ebt_mutex);
101830- if (copy_to_user(user, &tmp, *len) != 0) {
101831+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
101832 BUGPRINT("c2u Didn't work\n");
101833 ret = -EFAULT;
101834 break;
101835@@ -2324,7 +2324,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
101836 goto out;
101837 tmp.valid_hooks = t->valid_hooks;
101838
101839- if (copy_to_user(user, &tmp, *len) != 0) {
101840+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
101841 ret = -EFAULT;
101842 break;
101843 }
101844@@ -2335,7 +2335,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
101845 tmp.entries_size = t->table->entries_size;
101846 tmp.valid_hooks = t->table->valid_hooks;
101847
101848- if (copy_to_user(user, &tmp, *len) != 0) {
101849+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
101850 ret = -EFAULT;
101851 break;
101852 }
101853diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
101854index f5afda1..dcf770a 100644
101855--- a/net/caif/cfctrl.c
101856+++ b/net/caif/cfctrl.c
101857@@ -10,6 +10,7 @@
101858 #include <linux/spinlock.h>
101859 #include <linux/slab.h>
101860 #include <linux/pkt_sched.h>
101861+#include <linux/sched.h>
101862 #include <net/caif/caif_layer.h>
101863 #include <net/caif/cfpkt.h>
101864 #include <net/caif/cfctrl.h>
101865@@ -43,8 +44,8 @@ struct cflayer *cfctrl_create(void)
101866 memset(&dev_info, 0, sizeof(dev_info));
101867 dev_info.id = 0xff;
101868 cfsrvl_init(&this->serv, 0, &dev_info, false);
101869- atomic_set(&this->req_seq_no, 1);
101870- atomic_set(&this->rsp_seq_no, 1);
101871+ atomic_set_unchecked(&this->req_seq_no, 1);
101872+ atomic_set_unchecked(&this->rsp_seq_no, 1);
101873 this->serv.layer.receive = cfctrl_recv;
101874 sprintf(this->serv.layer.name, "ctrl");
101875 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
101876@@ -130,8 +131,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
101877 struct cfctrl_request_info *req)
101878 {
101879 spin_lock_bh(&ctrl->info_list_lock);
101880- atomic_inc(&ctrl->req_seq_no);
101881- req->sequence_no = atomic_read(&ctrl->req_seq_no);
101882+ atomic_inc_unchecked(&ctrl->req_seq_no);
101883+ req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
101884 list_add_tail(&req->list, &ctrl->list);
101885 spin_unlock_bh(&ctrl->info_list_lock);
101886 }
101887@@ -149,7 +150,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
101888 if (p != first)
101889 pr_warn("Requests are not received in order\n");
101890
101891- atomic_set(&ctrl->rsp_seq_no,
101892+ atomic_set_unchecked(&ctrl->rsp_seq_no,
101893 p->sequence_no);
101894 list_del(&p->list);
101895 goto out;
101896diff --git a/net/can/af_can.c b/net/can/af_can.c
101897index ce82337..5d17b4d 100644
101898--- a/net/can/af_can.c
101899+++ b/net/can/af_can.c
101900@@ -884,7 +884,7 @@ static const struct net_proto_family can_family_ops = {
101901 };
101902
101903 /* notifier block for netdevice event */
101904-static struct notifier_block can_netdev_notifier __read_mostly = {
101905+static struct notifier_block can_netdev_notifier = {
101906 .notifier_call = can_notifier,
101907 };
101908
101909diff --git a/net/can/bcm.c b/net/can/bcm.c
101910index dcb75c0..24b1b43 100644
101911--- a/net/can/bcm.c
101912+++ b/net/can/bcm.c
101913@@ -1624,7 +1624,7 @@ static int __init bcm_module_init(void)
101914 }
101915
101916 /* create /proc/net/can-bcm directory */
101917- proc_dir = proc_mkdir("can-bcm", init_net.proc_net);
101918+ proc_dir = proc_mkdir_restrict("can-bcm", init_net.proc_net);
101919 return 0;
101920 }
101921
101922diff --git a/net/can/gw.c b/net/can/gw.c
101923index 050a211..bb9fe33 100644
101924--- a/net/can/gw.c
101925+++ b/net/can/gw.c
101926@@ -80,7 +80,6 @@ MODULE_PARM_DESC(max_hops,
101927 "default: " __stringify(CGW_DEFAULT_HOPS) ")");
101928
101929 static HLIST_HEAD(cgw_list);
101930-static struct notifier_block notifier;
101931
101932 static struct kmem_cache *cgw_cache __read_mostly;
101933
101934@@ -947,6 +946,10 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh)
101935 return err;
101936 }
101937
101938+static struct notifier_block notifier = {
101939+ .notifier_call = cgw_notifier
101940+};
101941+
101942 static __init int cgw_module_init(void)
101943 {
101944 /* sanitize given module parameter */
101945@@ -962,7 +965,6 @@ static __init int cgw_module_init(void)
101946 return -ENOMEM;
101947
101948 /* set notifier */
101949- notifier.notifier_call = cgw_notifier;
101950 register_netdevice_notifier(&notifier);
101951
101952 if (__rtnl_register(PF_CAN, RTM_GETROUTE, NULL, cgw_dump_jobs, NULL)) {
101953diff --git a/net/can/proc.c b/net/can/proc.c
101954index 1a19b98..df2b4ec 100644
101955--- a/net/can/proc.c
101956+++ b/net/can/proc.c
101957@@ -514,7 +514,7 @@ static void can_remove_proc_readentry(const char *name)
101958 void can_init_proc(void)
101959 {
101960 /* create /proc/net/can directory */
101961- can_dir = proc_mkdir("can", init_net.proc_net);
101962+ can_dir = proc_mkdir_restrict("can", init_net.proc_net);
101963
101964 if (!can_dir) {
101965 printk(KERN_INFO "can: failed to create /proc/net/can . "
101966diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
101967index b2f571d..b584643 100644
101968--- a/net/ceph/messenger.c
101969+++ b/net/ceph/messenger.c
101970@@ -188,7 +188,7 @@ static void con_fault(struct ceph_connection *con);
101971 #define MAX_ADDR_STR_LEN 64 /* 54 is enough */
101972
101973 static char addr_str[ADDR_STR_COUNT][MAX_ADDR_STR_LEN];
101974-static atomic_t addr_str_seq = ATOMIC_INIT(0);
101975+static atomic_unchecked_t addr_str_seq = ATOMIC_INIT(0);
101976
101977 static struct page *zero_page; /* used in certain error cases */
101978
101979@@ -199,7 +199,7 @@ const char *ceph_pr_addr(const struct sockaddr_storage *ss)
101980 struct sockaddr_in *in4 = (struct sockaddr_in *) ss;
101981 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) ss;
101982
101983- i = atomic_inc_return(&addr_str_seq) & ADDR_STR_COUNT_MASK;
101984+ i = atomic_inc_return_unchecked(&addr_str_seq) & ADDR_STR_COUNT_MASK;
101985 s = addr_str[i];
101986
101987 switch (ss->ss_family) {
101988diff --git a/net/compat.c b/net/compat.c
101989index bc8aeef..f9c070c 100644
101990--- a/net/compat.c
101991+++ b/net/compat.c
101992@@ -73,9 +73,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
101993 return -EFAULT;
101994 if (kmsg->msg_namelen > sizeof(struct sockaddr_storage))
101995 kmsg->msg_namelen = sizeof(struct sockaddr_storage);
101996- kmsg->msg_name = compat_ptr(tmp1);
101997- kmsg->msg_iov = compat_ptr(tmp2);
101998- kmsg->msg_control = compat_ptr(tmp3);
101999+ kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
102000+ kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
102001+ kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
102002 return 0;
102003 }
102004
102005@@ -87,7 +87,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
102006
102007 if (kern_msg->msg_name && kern_msg->msg_namelen) {
102008 if (mode == VERIFY_READ) {
102009- int err = move_addr_to_kernel(kern_msg->msg_name,
102010+ int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name,
102011 kern_msg->msg_namelen,
102012 kern_address);
102013 if (err < 0)
102014@@ -100,7 +100,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
102015 }
102016
102017 tot_len = iov_from_user_compat_to_kern(kern_iov,
102018- (struct compat_iovec __user *)kern_msg->msg_iov,
102019+ (struct compat_iovec __force_user *)kern_msg->msg_iov,
102020 kern_msg->msg_iovlen);
102021 if (tot_len >= 0)
102022 kern_msg->msg_iov = kern_iov;
102023@@ -120,20 +120,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
102024
102025 #define CMSG_COMPAT_FIRSTHDR(msg) \
102026 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
102027- (struct compat_cmsghdr __user *)((msg)->msg_control) : \
102028+ (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
102029 (struct compat_cmsghdr __user *)NULL)
102030
102031 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
102032 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
102033 (ucmlen) <= (unsigned long) \
102034 ((mhdr)->msg_controllen - \
102035- ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
102036+ ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
102037
102038 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
102039 struct compat_cmsghdr __user *cmsg, int cmsg_len)
102040 {
102041 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
102042- if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
102043+ if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
102044 msg->msg_controllen)
102045 return NULL;
102046 return (struct compat_cmsghdr __user *)ptr;
102047@@ -223,7 +223,7 @@ Efault:
102048
102049 int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *data)
102050 {
102051- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
102052+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
102053 struct compat_cmsghdr cmhdr;
102054 struct compat_timeval ctv;
102055 struct compat_timespec cts[3];
102056@@ -279,7 +279,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
102057
102058 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
102059 {
102060- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
102061+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
102062 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
102063 int fdnum = scm->fp->count;
102064 struct file **fp = scm->fp->fp;
102065@@ -367,7 +367,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
102066 return -EFAULT;
102067 old_fs = get_fs();
102068 set_fs(KERNEL_DS);
102069- err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
102070+ err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
102071 set_fs(old_fs);
102072
102073 return err;
102074@@ -428,7 +428,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
102075 len = sizeof(ktime);
102076 old_fs = get_fs();
102077 set_fs(KERNEL_DS);
102078- err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
102079+ err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
102080 set_fs(old_fs);
102081
102082 if (!err) {
102083@@ -571,7 +571,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
102084 case MCAST_JOIN_GROUP:
102085 case MCAST_LEAVE_GROUP:
102086 {
102087- struct compat_group_req __user *gr32 = (void *)optval;
102088+ struct compat_group_req __user *gr32 = (void __user *)optval;
102089 struct group_req __user *kgr =
102090 compat_alloc_user_space(sizeof(struct group_req));
102091 u32 interface;
102092@@ -592,7 +592,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
102093 case MCAST_BLOCK_SOURCE:
102094 case MCAST_UNBLOCK_SOURCE:
102095 {
102096- struct compat_group_source_req __user *gsr32 = (void *)optval;
102097+ struct compat_group_source_req __user *gsr32 = (void __user *)optval;
102098 struct group_source_req __user *kgsr = compat_alloc_user_space(
102099 sizeof(struct group_source_req));
102100 u32 interface;
102101@@ -613,7 +613,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
102102 }
102103 case MCAST_MSFILTER:
102104 {
102105- struct compat_group_filter __user *gf32 = (void *)optval;
102106+ struct compat_group_filter __user *gf32 = (void __user *)optval;
102107 struct group_filter __user *kgf;
102108 u32 interface, fmode, numsrc;
102109
102110@@ -651,7 +651,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
102111 char __user *optval, int __user *optlen,
102112 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
102113 {
102114- struct compat_group_filter __user *gf32 = (void *)optval;
102115+ struct compat_group_filter __user *gf32 = (void __user *)optval;
102116 struct group_filter __user *kgf;
102117 int __user *koptlen;
102118 u32 interface, fmode, numsrc;
102119@@ -804,7 +804,7 @@ COMPAT_SYSCALL_DEFINE2(socketcall, int, call, u32 __user *, args)
102120
102121 if (call < SYS_SOCKET || call > SYS_SENDMMSG)
102122 return -EINVAL;
102123- if (copy_from_user(a, args, nas[call]))
102124+ if (nas[call] > sizeof a || copy_from_user(a, args, nas[call]))
102125 return -EFAULT;
102126 a0 = a[0];
102127 a1 = a[1];
102128diff --git a/net/core/datagram.c b/net/core/datagram.c
102129index fdbc9a8..cd6972c 100644
102130--- a/net/core/datagram.c
102131+++ b/net/core/datagram.c
102132@@ -301,7 +301,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
102133 }
102134
102135 kfree_skb(skb);
102136- atomic_inc(&sk->sk_drops);
102137+ atomic_inc_unchecked(&sk->sk_drops);
102138 sk_mem_reclaim_partial(sk);
102139
102140 return err;
102141diff --git a/net/core/dev.c b/net/core/dev.c
102142index cf8a95f..2837211 100644
102143--- a/net/core/dev.c
102144+++ b/net/core/dev.c
102145@@ -1683,14 +1683,14 @@ int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
102146 {
102147 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
102148 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
102149- atomic_long_inc(&dev->rx_dropped);
102150+ atomic_long_inc_unchecked(&dev->rx_dropped);
102151 kfree_skb(skb);
102152 return NET_RX_DROP;
102153 }
102154 }
102155
102156 if (unlikely(!is_skb_forwardable(dev, skb))) {
102157- atomic_long_inc(&dev->rx_dropped);
102158+ atomic_long_inc_unchecked(&dev->rx_dropped);
102159 kfree_skb(skb);
102160 return NET_RX_DROP;
102161 }
102162@@ -2487,7 +2487,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
102163
102164 struct dev_gso_cb {
102165 void (*destructor)(struct sk_buff *skb);
102166-};
102167+} __no_const;
102168
102169 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
102170
102171@@ -2952,7 +2952,7 @@ recursion_alert:
102172 rc = -ENETDOWN;
102173 rcu_read_unlock_bh();
102174
102175- atomic_long_inc(&dev->tx_dropped);
102176+ atomic_long_inc_unchecked(&dev->tx_dropped);
102177 kfree_skb(skb);
102178 return rc;
102179 out:
102180@@ -3296,7 +3296,7 @@ enqueue:
102181
102182 local_irq_restore(flags);
102183
102184- atomic_long_inc(&skb->dev->rx_dropped);
102185+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
102186 kfree_skb(skb);
102187 return NET_RX_DROP;
102188 }
102189@@ -3373,7 +3373,7 @@ int netif_rx_ni(struct sk_buff *skb)
102190 }
102191 EXPORT_SYMBOL(netif_rx_ni);
102192
102193-static void net_tx_action(struct softirq_action *h)
102194+static __latent_entropy void net_tx_action(void)
102195 {
102196 struct softnet_data *sd = &__get_cpu_var(softnet_data);
102197
102198@@ -3706,7 +3706,7 @@ ncls:
102199 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
102200 } else {
102201 drop:
102202- atomic_long_inc(&skb->dev->rx_dropped);
102203+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
102204 kfree_skb(skb);
102205 /* Jamal, now you will not able to escape explaining
102206 * me how you were going to use this. :-)
102207@@ -4426,7 +4426,7 @@ void netif_napi_del(struct napi_struct *napi)
102208 }
102209 EXPORT_SYMBOL(netif_napi_del);
102210
102211-static void net_rx_action(struct softirq_action *h)
102212+static __latent_entropy void net_rx_action(void)
102213 {
102214 struct softnet_data *sd = &__get_cpu_var(softnet_data);
102215 unsigned long time_limit = jiffies + 2;
102216@@ -6480,8 +6480,8 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
102217 } else {
102218 netdev_stats_to_stats64(storage, &dev->stats);
102219 }
102220- storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
102221- storage->tx_dropped += atomic_long_read(&dev->tx_dropped);
102222+ storage->rx_dropped += atomic_long_read_unchecked(&dev->rx_dropped);
102223+ storage->tx_dropped += atomic_long_read_unchecked(&dev->tx_dropped);
102224 return storage;
102225 }
102226 EXPORT_SYMBOL(dev_get_stats);
102227diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
102228index cf999e0..c59a975 100644
102229--- a/net/core/dev_ioctl.c
102230+++ b/net/core/dev_ioctl.c
102231@@ -366,9 +366,13 @@ void dev_load(struct net *net, const char *name)
102232 if (no_module && capable(CAP_NET_ADMIN))
102233 no_module = request_module("netdev-%s", name);
102234 if (no_module && capable(CAP_SYS_MODULE)) {
102235+#ifdef CONFIG_GRKERNSEC_MODHARDEN
102236+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
102237+#else
102238 if (!request_module("%s", name))
102239 pr_warn("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
102240 name);
102241+#endif
102242 }
102243 }
102244 EXPORT_SYMBOL(dev_load);
102245diff --git a/net/core/filter.c b/net/core/filter.c
102246index d814b8a..b5ab778 100644
102247--- a/net/core/filter.c
102248+++ b/net/core/filter.c
102249@@ -559,7 +559,11 @@ do_pass:
102250
102251 /* Unkown instruction. */
102252 default:
102253- goto err;
102254+ WARN(1, KERN_ALERT "Unknown sock filter code:%u jt:%u tf:%u k:%u\n",
102255+ fp->code, fp->jt, fp->jf, fp->k);
102256+ kfree(addrs);
102257+ BUG();
102258+ return -EINVAL;
102259 }
102260
102261 insn++;
102262@@ -606,7 +610,7 @@ static int check_load_and_stores(const struct sock_filter *filter, int flen)
102263 u16 *masks, memvalid = 0; /* One bit per cell, 16 cells */
102264 int pc, ret = 0;
102265
102266- BUILD_BUG_ON(BPF_MEMWORDS > 16);
102267+ BUILD_BUG_ON(BPF_MEMWORDS != 16);
102268
102269 masks = kmalloc_array(flen, sizeof(*masks), GFP_KERNEL);
102270 if (!masks)
102271@@ -933,7 +937,7 @@ static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp)
102272
102273 /* Expand fp for appending the new filter representation. */
102274 old_fp = fp;
102275- fp = krealloc(old_fp, bpf_prog_size(new_len), GFP_KERNEL);
102276+ fp = bpf_prog_realloc(old_fp, bpf_prog_size(new_len), 0);
102277 if (!fp) {
102278 /* The old_fp is still around in case we couldn't
102279 * allocate new memory, so uncharge on that one.
102280@@ -1013,11 +1017,11 @@ int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog)
102281 if (fprog->filter == NULL)
102282 return -EINVAL;
102283
102284- fp = kmalloc(bpf_prog_size(fprog->len), GFP_KERNEL);
102285+ fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0);
102286 if (!fp)
102287 return -ENOMEM;
102288
102289- memcpy(fp->insns, fprog->filter, fsize);
102290+ memcpy(fp->insns, (void __force_kernel *)fprog->filter, fsize);
102291
102292 fp->len = fprog->len;
102293 /* Since unattached filters are not copied back to user
102294@@ -1069,12 +1073,12 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
102295 if (fprog->filter == NULL)
102296 return -EINVAL;
102297
102298- prog = kmalloc(bpf_fsize, GFP_KERNEL);
102299+ prog = bpf_prog_alloc(bpf_fsize, 0);
102300 if (!prog)
102301 return -ENOMEM;
102302
102303 if (copy_from_user(prog->insns, fprog->filter, fsize)) {
102304- kfree(prog);
102305+ __bpf_prog_free(prog);
102306 return -EFAULT;
102307 }
102308
102309@@ -1082,7 +1086,7 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
102310
102311 err = bpf_prog_store_orig_filter(prog, fprog);
102312 if (err) {
102313- kfree(prog);
102314+ __bpf_prog_free(prog);
102315 return -ENOMEM;
102316 }
102317
102318diff --git a/net/core/flow.c b/net/core/flow.c
102319index a0348fd..6951c76 100644
102320--- a/net/core/flow.c
102321+++ b/net/core/flow.c
102322@@ -65,7 +65,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
102323 static int flow_entry_valid(struct flow_cache_entry *fle,
102324 struct netns_xfrm *xfrm)
102325 {
102326- if (atomic_read(&xfrm->flow_cache_genid) != fle->genid)
102327+ if (atomic_read_unchecked(&xfrm->flow_cache_genid) != fle->genid)
102328 return 0;
102329 if (fle->object && !fle->object->ops->check(fle->object))
102330 return 0;
102331@@ -242,7 +242,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
102332 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
102333 fcp->hash_count++;
102334 }
102335- } else if (likely(fle->genid == atomic_read(&net->xfrm.flow_cache_genid))) {
102336+ } else if (likely(fle->genid == atomic_read_unchecked(&net->xfrm.flow_cache_genid))) {
102337 flo = fle->object;
102338 if (!flo)
102339 goto ret_object;
102340@@ -263,7 +263,7 @@ nocache:
102341 }
102342 flo = resolver(net, key, family, dir, flo, ctx);
102343 if (fle) {
102344- fle->genid = atomic_read(&net->xfrm.flow_cache_genid);
102345+ fle->genid = atomic_read_unchecked(&net->xfrm.flow_cache_genid);
102346 if (!IS_ERR(flo))
102347 fle->object = flo;
102348 else
102349diff --git a/net/core/iovec.c b/net/core/iovec.c
102350index e1ec45a..e5c6f16 100644
102351--- a/net/core/iovec.c
102352+++ b/net/core/iovec.c
102353@@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
102354 if (m->msg_name && m->msg_namelen) {
102355 if (mode == VERIFY_READ) {
102356 void __user *namep;
102357- namep = (void __user __force *) m->msg_name;
102358+ namep = (void __force_user *) m->msg_name;
102359 err = move_addr_to_kernel(namep, m->msg_namelen,
102360 address);
102361 if (err < 0)
102362@@ -55,7 +55,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
102363 }
102364
102365 size = m->msg_iovlen * sizeof(struct iovec);
102366- if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
102367+ if (copy_from_user(iov, (void __force_user *) m->msg_iov, size))
102368 return -EFAULT;
102369
102370 m->msg_iov = iov;
102371diff --git a/net/core/neighbour.c b/net/core/neighbour.c
102372index ef31fef..8be66d9 100644
102373--- a/net/core/neighbour.c
102374+++ b/net/core/neighbour.c
102375@@ -2825,7 +2825,7 @@ static int proc_unres_qlen(struct ctl_table *ctl, int write,
102376 void __user *buffer, size_t *lenp, loff_t *ppos)
102377 {
102378 int size, ret;
102379- struct ctl_table tmp = *ctl;
102380+ ctl_table_no_const tmp = *ctl;
102381
102382 tmp.extra1 = &zero;
102383 tmp.extra2 = &unres_qlen_max;
102384@@ -2887,7 +2887,7 @@ static int neigh_proc_dointvec_zero_intmax(struct ctl_table *ctl, int write,
102385 void __user *buffer,
102386 size_t *lenp, loff_t *ppos)
102387 {
102388- struct ctl_table tmp = *ctl;
102389+ ctl_table_no_const tmp = *ctl;
102390 int ret;
102391
102392 tmp.extra1 = &zero;
102393diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c
102394index 2bf8329..2eb1423 100644
102395--- a/net/core/net-procfs.c
102396+++ b/net/core/net-procfs.c
102397@@ -79,7 +79,13 @@ static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
102398 struct rtnl_link_stats64 temp;
102399 const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
102400
102401- seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
102402+ if (gr_proc_is_restricted())
102403+ seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
102404+ "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
102405+ dev->name, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL,
102406+ 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL);
102407+ else
102408+ seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
102409 "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
102410 dev->name, stats->rx_bytes, stats->rx_packets,
102411 stats->rx_errors,
102412@@ -166,7 +172,7 @@ static int softnet_seq_show(struct seq_file *seq, void *v)
102413 return 0;
102414 }
102415
102416-static const struct seq_operations dev_seq_ops = {
102417+const struct seq_operations dev_seq_ops = {
102418 .start = dev_seq_start,
102419 .next = dev_seq_next,
102420 .stop = dev_seq_stop,
102421@@ -196,7 +202,7 @@ static const struct seq_operations softnet_seq_ops = {
102422
102423 static int softnet_seq_open(struct inode *inode, struct file *file)
102424 {
102425- return seq_open(file, &softnet_seq_ops);
102426+ return seq_open_restrict(file, &softnet_seq_ops);
102427 }
102428
102429 static const struct file_operations softnet_seq_fops = {
102430@@ -283,8 +289,13 @@ static int ptype_seq_show(struct seq_file *seq, void *v)
102431 else
102432 seq_printf(seq, "%04x", ntohs(pt->type));
102433
102434+#ifdef CONFIG_GRKERNSEC_HIDESYM
102435+ seq_printf(seq, " %-8s %pf\n",
102436+ pt->dev ? pt->dev->name : "", NULL);
102437+#else
102438 seq_printf(seq, " %-8s %pf\n",
102439 pt->dev ? pt->dev->name : "", pt->func);
102440+#endif
102441 }
102442
102443 return 0;
102444diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
102445index 9dd0669..c52fb1b 100644
102446--- a/net/core/net-sysfs.c
102447+++ b/net/core/net-sysfs.c
102448@@ -278,7 +278,7 @@ static ssize_t carrier_changes_show(struct device *dev,
102449 {
102450 struct net_device *netdev = to_net_dev(dev);
102451 return sprintf(buf, fmt_dec,
102452- atomic_read(&netdev->carrier_changes));
102453+ atomic_read_unchecked(&netdev->carrier_changes));
102454 }
102455 static DEVICE_ATTR_RO(carrier_changes);
102456
102457diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
102458index 7c6b51a..e9dd57f 100644
102459--- a/net/core/net_namespace.c
102460+++ b/net/core/net_namespace.c
102461@@ -445,7 +445,7 @@ static int __register_pernet_operations(struct list_head *list,
102462 int error;
102463 LIST_HEAD(net_exit_list);
102464
102465- list_add_tail(&ops->list, list);
102466+ pax_list_add_tail((struct list_head *)&ops->list, list);
102467 if (ops->init || (ops->id && ops->size)) {
102468 for_each_net(net) {
102469 error = ops_init(ops, net);
102470@@ -458,7 +458,7 @@ static int __register_pernet_operations(struct list_head *list,
102471
102472 out_undo:
102473 /* If I have an error cleanup all namespaces I initialized */
102474- list_del(&ops->list);
102475+ pax_list_del((struct list_head *)&ops->list);
102476 ops_exit_list(ops, &net_exit_list);
102477 ops_free_list(ops, &net_exit_list);
102478 return error;
102479@@ -469,7 +469,7 @@ static void __unregister_pernet_operations(struct pernet_operations *ops)
102480 struct net *net;
102481 LIST_HEAD(net_exit_list);
102482
102483- list_del(&ops->list);
102484+ pax_list_del((struct list_head *)&ops->list);
102485 for_each_net(net)
102486 list_add_tail(&net->exit_list, &net_exit_list);
102487 ops_exit_list(ops, &net_exit_list);
102488@@ -603,7 +603,7 @@ int register_pernet_device(struct pernet_operations *ops)
102489 mutex_lock(&net_mutex);
102490 error = register_pernet_operations(&pernet_list, ops);
102491 if (!error && (first_device == &pernet_list))
102492- first_device = &ops->list;
102493+ first_device = (struct list_head *)&ops->list;
102494 mutex_unlock(&net_mutex);
102495 return error;
102496 }
102497diff --git a/net/core/netpoll.c b/net/core/netpoll.c
102498index 907fb5e..8260f040b 100644
102499--- a/net/core/netpoll.c
102500+++ b/net/core/netpoll.c
102501@@ -382,7 +382,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
102502 struct udphdr *udph;
102503 struct iphdr *iph;
102504 struct ethhdr *eth;
102505- static atomic_t ip_ident;
102506+ static atomic_unchecked_t ip_ident;
102507 struct ipv6hdr *ip6h;
102508
102509 udp_len = len + sizeof(*udph);
102510@@ -453,7 +453,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
102511 put_unaligned(0x45, (unsigned char *)iph);
102512 iph->tos = 0;
102513 put_unaligned(htons(ip_len), &(iph->tot_len));
102514- iph->id = htons(atomic_inc_return(&ip_ident));
102515+ iph->id = htons(atomic_inc_return_unchecked(&ip_ident));
102516 iph->frag_off = 0;
102517 iph->ttl = 64;
102518 iph->protocol = IPPROTO_UDP;
102519diff --git a/net/core/pktgen.c b/net/core/pktgen.c
102520index 8b849dd..cd88bfc 100644
102521--- a/net/core/pktgen.c
102522+++ b/net/core/pktgen.c
102523@@ -3723,7 +3723,7 @@ static int __net_init pg_net_init(struct net *net)
102524 pn->net = net;
102525 INIT_LIST_HEAD(&pn->pktgen_threads);
102526 pn->pktgen_exiting = false;
102527- pn->proc_dir = proc_mkdir(PG_PROC_DIR, pn->net->proc_net);
102528+ pn->proc_dir = proc_mkdir_restrict(PG_PROC_DIR, pn->net->proc_net);
102529 if (!pn->proc_dir) {
102530 pr_warn("cannot create /proc/net/%s\n", PG_PROC_DIR);
102531 return -ENODEV;
102532diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
102533index f0493e3..0f43f7a 100644
102534--- a/net/core/rtnetlink.c
102535+++ b/net/core/rtnetlink.c
102536@@ -58,7 +58,7 @@ struct rtnl_link {
102537 rtnl_doit_func doit;
102538 rtnl_dumpit_func dumpit;
102539 rtnl_calcit_func calcit;
102540-};
102541+} __no_const;
102542
102543 static DEFINE_MUTEX(rtnl_mutex);
102544
102545@@ -304,10 +304,13 @@ int __rtnl_link_register(struct rtnl_link_ops *ops)
102546 * to use the ops for creating device. So do not
102547 * fill up dellink as well. That disables rtnl_dellink.
102548 */
102549- if (ops->setup && !ops->dellink)
102550- ops->dellink = unregister_netdevice_queue;
102551+ if (ops->setup && !ops->dellink) {
102552+ pax_open_kernel();
102553+ *(void **)&ops->dellink = unregister_netdevice_queue;
102554+ pax_close_kernel();
102555+ }
102556
102557- list_add_tail(&ops->list, &link_ops);
102558+ pax_list_add_tail((struct list_head *)&ops->list, &link_ops);
102559 return 0;
102560 }
102561 EXPORT_SYMBOL_GPL(__rtnl_link_register);
102562@@ -354,7 +357,7 @@ void __rtnl_link_unregister(struct rtnl_link_ops *ops)
102563 for_each_net(net) {
102564 __rtnl_kill_links(net, ops);
102565 }
102566- list_del(&ops->list);
102567+ pax_list_del((struct list_head *)&ops->list);
102568 }
102569 EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
102570
102571@@ -1014,7 +1017,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
102572 (dev->ifalias &&
102573 nla_put_string(skb, IFLA_IFALIAS, dev->ifalias)) ||
102574 nla_put_u32(skb, IFLA_CARRIER_CHANGES,
102575- atomic_read(&dev->carrier_changes)))
102576+ atomic_read_unchecked(&dev->carrier_changes)))
102577 goto nla_put_failure;
102578
102579 if (1) {
102580diff --git a/net/core/scm.c b/net/core/scm.c
102581index b442e7e..6f5b5a2 100644
102582--- a/net/core/scm.c
102583+++ b/net/core/scm.c
102584@@ -210,7 +210,7 @@ EXPORT_SYMBOL(__scm_send);
102585 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
102586 {
102587 struct cmsghdr __user *cm
102588- = (__force struct cmsghdr __user *)msg->msg_control;
102589+ = (struct cmsghdr __force_user *)msg->msg_control;
102590 struct cmsghdr cmhdr;
102591 int cmlen = CMSG_LEN(len);
102592 int err;
102593@@ -233,7 +233,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
102594 err = -EFAULT;
102595 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
102596 goto out;
102597- if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
102598+ if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
102599 goto out;
102600 cmlen = CMSG_SPACE(len);
102601 if (msg->msg_controllen < cmlen)
102602@@ -249,7 +249,7 @@ EXPORT_SYMBOL(put_cmsg);
102603 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
102604 {
102605 struct cmsghdr __user *cm
102606- = (__force struct cmsghdr __user*)msg->msg_control;
102607+ = (struct cmsghdr __force_user *)msg->msg_control;
102608
102609 int fdmax = 0;
102610 int fdnum = scm->fp->count;
102611@@ -269,7 +269,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
102612 if (fdnum < fdmax)
102613 fdmax = fdnum;
102614
102615- for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
102616+ for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
102617 i++, cmfptr++)
102618 {
102619 struct socket *sock;
102620diff --git a/net/core/skbuff.c b/net/core/skbuff.c
102621index 8d28969..4d36260 100644
102622--- a/net/core/skbuff.c
102623+++ b/net/core/skbuff.c
102624@@ -360,18 +360,29 @@ refill:
102625 goto end;
102626 }
102627 nc->frag.size = PAGE_SIZE << order;
102628-recycle:
102629- atomic_set(&nc->frag.page->_count, NETDEV_PAGECNT_MAX_BIAS);
102630+ /* Even if we own the page, we do not use atomic_set().
102631+ * This would break get_page_unless_zero() users.
102632+ */
102633+ atomic_add(NETDEV_PAGECNT_MAX_BIAS - 1,
102634+ &nc->frag.page->_count);
102635 nc->pagecnt_bias = NETDEV_PAGECNT_MAX_BIAS;
102636 nc->frag.offset = 0;
102637 }
102638
102639 if (nc->frag.offset + fragsz > nc->frag.size) {
102640- /* avoid unnecessary locked operations if possible */
102641- if ((atomic_read(&nc->frag.page->_count) == nc->pagecnt_bias) ||
102642- atomic_sub_and_test(nc->pagecnt_bias, &nc->frag.page->_count))
102643- goto recycle;
102644- goto refill;
102645+ if (atomic_read(&nc->frag.page->_count) != nc->pagecnt_bias) {
102646+ if (!atomic_sub_and_test(nc->pagecnt_bias,
102647+ &nc->frag.page->_count))
102648+ goto refill;
102649+ /* OK, page count is 0, we can safely set it */
102650+ atomic_set(&nc->frag.page->_count,
102651+ NETDEV_PAGECNT_MAX_BIAS);
102652+ } else {
102653+ atomic_add(NETDEV_PAGECNT_MAX_BIAS - nc->pagecnt_bias,
102654+ &nc->frag.page->_count);
102655+ }
102656+ nc->pagecnt_bias = NETDEV_PAGECNT_MAX_BIAS;
102657+ nc->frag.offset = 0;
102658 }
102659
102660 data = page_address(nc->frag.page) + nc->frag.offset;
102661@@ -2011,7 +2022,7 @@ EXPORT_SYMBOL(__skb_checksum);
102662 __wsum skb_checksum(const struct sk_buff *skb, int offset,
102663 int len, __wsum csum)
102664 {
102665- const struct skb_checksum_ops ops = {
102666+ static const struct skb_checksum_ops ops = {
102667 .update = csum_partial_ext,
102668 .combine = csum_block_add_ext,
102669 };
102670@@ -3237,13 +3248,15 @@ void __init skb_init(void)
102671 skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
102672 sizeof(struct sk_buff),
102673 0,
102674- SLAB_HWCACHE_ALIGN|SLAB_PANIC,
102675+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|
102676+ SLAB_NO_SANITIZE,
102677 NULL);
102678 skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
102679 (2*sizeof(struct sk_buff)) +
102680 sizeof(atomic_t),
102681 0,
102682- SLAB_HWCACHE_ALIGN|SLAB_PANIC,
102683+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|
102684+ SLAB_NO_SANITIZE,
102685 NULL);
102686 }
102687
102688diff --git a/net/core/sock.c b/net/core/sock.c
102689index 9c3f823..bd8c884 100644
102690--- a/net/core/sock.c
102691+++ b/net/core/sock.c
102692@@ -442,7 +442,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
102693 struct sk_buff_head *list = &sk->sk_receive_queue;
102694
102695 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
102696- atomic_inc(&sk->sk_drops);
102697+ atomic_inc_unchecked(&sk->sk_drops);
102698 trace_sock_rcvqueue_full(sk, skb);
102699 return -ENOMEM;
102700 }
102701@@ -452,7 +452,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
102702 return err;
102703
102704 if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
102705- atomic_inc(&sk->sk_drops);
102706+ atomic_inc_unchecked(&sk->sk_drops);
102707 return -ENOBUFS;
102708 }
102709
102710@@ -472,7 +472,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
102711 skb_dst_force(skb);
102712
102713 spin_lock_irqsave(&list->lock, flags);
102714- skb->dropcount = atomic_read(&sk->sk_drops);
102715+ skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
102716 __skb_queue_tail(list, skb);
102717 spin_unlock_irqrestore(&list->lock, flags);
102718
102719@@ -492,7 +492,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
102720 skb->dev = NULL;
102721
102722 if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
102723- atomic_inc(&sk->sk_drops);
102724+ atomic_inc_unchecked(&sk->sk_drops);
102725 goto discard_and_relse;
102726 }
102727 if (nested)
102728@@ -510,7 +510,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
102729 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
102730 } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
102731 bh_unlock_sock(sk);
102732- atomic_inc(&sk->sk_drops);
102733+ atomic_inc_unchecked(&sk->sk_drops);
102734 goto discard_and_relse;
102735 }
102736
102737@@ -999,12 +999,12 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
102738 struct timeval tm;
102739 } v;
102740
102741- int lv = sizeof(int);
102742- int len;
102743+ unsigned int lv = sizeof(int);
102744+ unsigned int len;
102745
102746 if (get_user(len, optlen))
102747 return -EFAULT;
102748- if (len < 0)
102749+ if (len > INT_MAX)
102750 return -EINVAL;
102751
102752 memset(&v, 0, sizeof(v));
102753@@ -1142,11 +1142,11 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
102754
102755 case SO_PEERNAME:
102756 {
102757- char address[128];
102758+ char address[_K_SS_MAXSIZE];
102759
102760 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
102761 return -ENOTCONN;
102762- if (lv < len)
102763+ if (lv < len || sizeof address < len)
102764 return -EINVAL;
102765 if (copy_to_user(optval, address, len))
102766 return -EFAULT;
102767@@ -1227,7 +1227,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
102768
102769 if (len > lv)
102770 len = lv;
102771- if (copy_to_user(optval, &v, len))
102772+ if (len > sizeof(v) || copy_to_user(optval, &v, len))
102773 return -EFAULT;
102774 lenout:
102775 if (put_user(len, optlen))
102776@@ -1723,6 +1723,8 @@ EXPORT_SYMBOL(sock_kmalloc);
102777 */
102778 void sock_kfree_s(struct sock *sk, void *mem, int size)
102779 {
102780+ if (WARN_ON_ONCE(!mem))
102781+ return;
102782 kfree(mem);
102783 atomic_sub(size, &sk->sk_omem_alloc);
102784 }
102785@@ -2369,7 +2371,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
102786 */
102787 smp_wmb();
102788 atomic_set(&sk->sk_refcnt, 1);
102789- atomic_set(&sk->sk_drops, 0);
102790+ atomic_set_unchecked(&sk->sk_drops, 0);
102791 }
102792 EXPORT_SYMBOL(sock_init_data);
102793
102794@@ -2497,6 +2499,7 @@ void sock_enable_timestamp(struct sock *sk, int flag)
102795 int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
102796 int level, int type)
102797 {
102798+ struct sock_extended_err ee;
102799 struct sock_exterr_skb *serr;
102800 struct sk_buff *skb, *skb2;
102801 int copied, err;
102802@@ -2518,7 +2521,8 @@ int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
102803 sock_recv_timestamp(msg, sk, skb);
102804
102805 serr = SKB_EXT_ERR(skb);
102806- put_cmsg(msg, level, type, sizeof(serr->ee), &serr->ee);
102807+ ee = serr->ee;
102808+ put_cmsg(msg, level, type, sizeof ee, &ee);
102809
102810 msg->msg_flags |= MSG_ERRQUEUE;
102811 err = copied;
102812diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
102813index ad704c7..ca48aff 100644
102814--- a/net/core/sock_diag.c
102815+++ b/net/core/sock_diag.c
102816@@ -9,26 +9,33 @@
102817 #include <linux/inet_diag.h>
102818 #include <linux/sock_diag.h>
102819
102820-static const struct sock_diag_handler *sock_diag_handlers[AF_MAX];
102821+static const struct sock_diag_handler *sock_diag_handlers[AF_MAX] __read_only;
102822 static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh);
102823 static DEFINE_MUTEX(sock_diag_table_mutex);
102824
102825 int sock_diag_check_cookie(void *sk, __u32 *cookie)
102826 {
102827+#ifndef CONFIG_GRKERNSEC_HIDESYM
102828 if ((cookie[0] != INET_DIAG_NOCOOKIE ||
102829 cookie[1] != INET_DIAG_NOCOOKIE) &&
102830 ((u32)(unsigned long)sk != cookie[0] ||
102831 (u32)((((unsigned long)sk) >> 31) >> 1) != cookie[1]))
102832 return -ESTALE;
102833 else
102834+#endif
102835 return 0;
102836 }
102837 EXPORT_SYMBOL_GPL(sock_diag_check_cookie);
102838
102839 void sock_diag_save_cookie(void *sk, __u32 *cookie)
102840 {
102841+#ifdef CONFIG_GRKERNSEC_HIDESYM
102842+ cookie[0] = 0;
102843+ cookie[1] = 0;
102844+#else
102845 cookie[0] = (u32)(unsigned long)sk;
102846 cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
102847+#endif
102848 }
102849 EXPORT_SYMBOL_GPL(sock_diag_save_cookie);
102850
102851@@ -110,8 +117,11 @@ int sock_diag_register(const struct sock_diag_handler *hndl)
102852 mutex_lock(&sock_diag_table_mutex);
102853 if (sock_diag_handlers[hndl->family])
102854 err = -EBUSY;
102855- else
102856+ else {
102857+ pax_open_kernel();
102858 sock_diag_handlers[hndl->family] = hndl;
102859+ pax_close_kernel();
102860+ }
102861 mutex_unlock(&sock_diag_table_mutex);
102862
102863 return err;
102864@@ -127,7 +137,9 @@ void sock_diag_unregister(const struct sock_diag_handler *hnld)
102865
102866 mutex_lock(&sock_diag_table_mutex);
102867 BUG_ON(sock_diag_handlers[family] != hnld);
102868+ pax_open_kernel();
102869 sock_diag_handlers[family] = NULL;
102870+ pax_close_kernel();
102871 mutex_unlock(&sock_diag_table_mutex);
102872 }
102873 EXPORT_SYMBOL_GPL(sock_diag_unregister);
102874diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
102875index cf9cd13..50683950 100644
102876--- a/net/core/sysctl_net_core.c
102877+++ b/net/core/sysctl_net_core.c
102878@@ -32,7 +32,7 @@ static int rps_sock_flow_sysctl(struct ctl_table *table, int write,
102879 {
102880 unsigned int orig_size, size;
102881 int ret, i;
102882- struct ctl_table tmp = {
102883+ ctl_table_no_const tmp = {
102884 .data = &size,
102885 .maxlen = sizeof(size),
102886 .mode = table->mode
102887@@ -200,7 +200,7 @@ static int set_default_qdisc(struct ctl_table *table, int write,
102888 void __user *buffer, size_t *lenp, loff_t *ppos)
102889 {
102890 char id[IFNAMSIZ];
102891- struct ctl_table tbl = {
102892+ ctl_table_no_const tbl = {
102893 .data = id,
102894 .maxlen = IFNAMSIZ,
102895 };
102896@@ -263,7 +263,7 @@ static struct ctl_table net_core_table[] = {
102897 .mode = 0644,
102898 .proc_handler = proc_dointvec
102899 },
102900-#ifdef CONFIG_BPF_JIT
102901+#if defined(CONFIG_BPF_JIT) && !defined(CONFIG_GRKERNSEC_BPF_HARDEN)
102902 {
102903 .procname = "bpf_jit_enable",
102904 .data = &bpf_jit_enable,
102905@@ -379,13 +379,12 @@ static struct ctl_table netns_core_table[] = {
102906
102907 static __net_init int sysctl_core_net_init(struct net *net)
102908 {
102909- struct ctl_table *tbl;
102910+ ctl_table_no_const *tbl = NULL;
102911
102912 net->core.sysctl_somaxconn = SOMAXCONN;
102913
102914- tbl = netns_core_table;
102915 if (!net_eq(net, &init_net)) {
102916- tbl = kmemdup(tbl, sizeof(netns_core_table), GFP_KERNEL);
102917+ tbl = kmemdup(netns_core_table, sizeof(netns_core_table), GFP_KERNEL);
102918 if (tbl == NULL)
102919 goto err_dup;
102920
102921@@ -395,17 +394,16 @@ static __net_init int sysctl_core_net_init(struct net *net)
102922 if (net->user_ns != &init_user_ns) {
102923 tbl[0].procname = NULL;
102924 }
102925- }
102926-
102927- net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
102928+ net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
102929+ } else
102930+ net->core.sysctl_hdr = register_net_sysctl(net, "net/core", netns_core_table);
102931 if (net->core.sysctl_hdr == NULL)
102932 goto err_reg;
102933
102934 return 0;
102935
102936 err_reg:
102937- if (tbl != netns_core_table)
102938- kfree(tbl);
102939+ kfree(tbl);
102940 err_dup:
102941 return -ENOMEM;
102942 }
102943@@ -420,7 +418,7 @@ static __net_exit void sysctl_core_net_exit(struct net *net)
102944 kfree(tbl);
102945 }
102946
102947-static __net_initdata struct pernet_operations sysctl_core_ops = {
102948+static __net_initconst struct pernet_operations sysctl_core_ops = {
102949 .init = sysctl_core_net_init,
102950 .exit = sysctl_core_net_exit,
102951 };
102952diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
102953index ae011b4..d2d18bf 100644
102954--- a/net/decnet/af_decnet.c
102955+++ b/net/decnet/af_decnet.c
102956@@ -465,6 +465,7 @@ static struct proto dn_proto = {
102957 .sysctl_rmem = sysctl_decnet_rmem,
102958 .max_header = DN_MAX_NSP_DATA_HEADER + 64,
102959 .obj_size = sizeof(struct dn_sock),
102960+ .slab_flags = SLAB_USERCOPY,
102961 };
102962
102963 static struct sock *dn_alloc_sock(struct net *net, struct socket *sock, gfp_t gfp)
102964diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
102965index 3b726f3..1af6368 100644
102966--- a/net/decnet/dn_dev.c
102967+++ b/net/decnet/dn_dev.c
102968@@ -200,7 +200,7 @@ static struct dn_dev_sysctl_table {
102969 .extra1 = &min_t3,
102970 .extra2 = &max_t3
102971 },
102972- {0}
102973+ { }
102974 },
102975 };
102976
102977diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
102978index 5325b54..a0d4d69 100644
102979--- a/net/decnet/sysctl_net_decnet.c
102980+++ b/net/decnet/sysctl_net_decnet.c
102981@@ -174,7 +174,7 @@ static int dn_node_address_handler(struct ctl_table *table, int write,
102982
102983 if (len > *lenp) len = *lenp;
102984
102985- if (copy_to_user(buffer, addr, len))
102986+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
102987 return -EFAULT;
102988
102989 *lenp = len;
102990@@ -237,7 +237,7 @@ static int dn_def_dev_handler(struct ctl_table *table, int write,
102991
102992 if (len > *lenp) len = *lenp;
102993
102994- if (copy_to_user(buffer, devname, len))
102995+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
102996 return -EFAULT;
102997
102998 *lenp = len;
102999diff --git a/net/ieee802154/reassembly.c b/net/ieee802154/reassembly.c
103000index 32755cb..236d827 100644
103001--- a/net/ieee802154/reassembly.c
103002+++ b/net/ieee802154/reassembly.c
103003@@ -433,14 +433,13 @@ static struct ctl_table lowpan_frags_ctl_table[] = {
103004
103005 static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
103006 {
103007- struct ctl_table *table;
103008+ ctl_table_no_const *table = NULL;
103009 struct ctl_table_header *hdr;
103010 struct netns_ieee802154_lowpan *ieee802154_lowpan =
103011 net_ieee802154_lowpan(net);
103012
103013- table = lowpan_frags_ns_ctl_table;
103014 if (!net_eq(net, &init_net)) {
103015- table = kmemdup(table, sizeof(lowpan_frags_ns_ctl_table),
103016+ table = kmemdup(lowpan_frags_ns_ctl_table, sizeof(lowpan_frags_ns_ctl_table),
103017 GFP_KERNEL);
103018 if (table == NULL)
103019 goto err_alloc;
103020@@ -455,9 +454,9 @@ static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
103021 /* Don't export sysctls to unprivileged users */
103022 if (net->user_ns != &init_user_ns)
103023 table[0].procname = NULL;
103024- }
103025-
103026- hdr = register_net_sysctl(net, "net/ieee802154/6lowpan", table);
103027+ hdr = register_net_sysctl(net, "net/ieee802154/6lowpan", table);
103028+ } else
103029+ hdr = register_net_sysctl(net, "net/ieee802154/6lowpan", lowpan_frags_ns_ctl_table);
103030 if (hdr == NULL)
103031 goto err_reg;
103032
103033@@ -465,8 +464,7 @@ static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
103034 return 0;
103035
103036 err_reg:
103037- if (!net_eq(net, &init_net))
103038- kfree(table);
103039+ kfree(table);
103040 err_alloc:
103041 return -ENOMEM;
103042 }
103043diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
103044index 214882e..f958b50 100644
103045--- a/net/ipv4/devinet.c
103046+++ b/net/ipv4/devinet.c
103047@@ -1548,7 +1548,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
103048 idx = 0;
103049 head = &net->dev_index_head[h];
103050 rcu_read_lock();
103051- cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
103052+ cb->seq = atomic_read_unchecked(&net->ipv4.dev_addr_genid) ^
103053 net->dev_base_seq;
103054 hlist_for_each_entry_rcu(dev, head, index_hlist) {
103055 if (idx < s_idx)
103056@@ -1866,7 +1866,7 @@ static int inet_netconf_dump_devconf(struct sk_buff *skb,
103057 idx = 0;
103058 head = &net->dev_index_head[h];
103059 rcu_read_lock();
103060- cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
103061+ cb->seq = atomic_read_unchecked(&net->ipv4.dev_addr_genid) ^
103062 net->dev_base_seq;
103063 hlist_for_each_entry_rcu(dev, head, index_hlist) {
103064 if (idx < s_idx)
103065@@ -2101,7 +2101,7 @@ static int ipv4_doint_and_flush(struct ctl_table *ctl, int write,
103066 #define DEVINET_SYSCTL_FLUSHING_ENTRY(attr, name) \
103067 DEVINET_SYSCTL_COMPLEX_ENTRY(attr, name, ipv4_doint_and_flush)
103068
103069-static struct devinet_sysctl_table {
103070+static const struct devinet_sysctl_table {
103071 struct ctl_table_header *sysctl_header;
103072 struct ctl_table devinet_vars[__IPV4_DEVCONF_MAX];
103073 } devinet_sysctl = {
103074@@ -2233,7 +2233,7 @@ static __net_init int devinet_init_net(struct net *net)
103075 int err;
103076 struct ipv4_devconf *all, *dflt;
103077 #ifdef CONFIG_SYSCTL
103078- struct ctl_table *tbl = ctl_forward_entry;
103079+ ctl_table_no_const *tbl = NULL;
103080 struct ctl_table_header *forw_hdr;
103081 #endif
103082
103083@@ -2251,7 +2251,7 @@ static __net_init int devinet_init_net(struct net *net)
103084 goto err_alloc_dflt;
103085
103086 #ifdef CONFIG_SYSCTL
103087- tbl = kmemdup(tbl, sizeof(ctl_forward_entry), GFP_KERNEL);
103088+ tbl = kmemdup(ctl_forward_entry, sizeof(ctl_forward_entry), GFP_KERNEL);
103089 if (tbl == NULL)
103090 goto err_alloc_ctl;
103091
103092@@ -2271,7 +2271,10 @@ static __net_init int devinet_init_net(struct net *net)
103093 goto err_reg_dflt;
103094
103095 err = -ENOMEM;
103096- forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
103097+ if (!net_eq(net, &init_net))
103098+ forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
103099+ else
103100+ forw_hdr = register_net_sysctl(net, "net/ipv4", ctl_forward_entry);
103101 if (forw_hdr == NULL)
103102 goto err_reg_ctl;
103103 net->ipv4.forw_hdr = forw_hdr;
103104@@ -2287,8 +2290,7 @@ err_reg_ctl:
103105 err_reg_dflt:
103106 __devinet_sysctl_unregister(all);
103107 err_reg_all:
103108- if (tbl != ctl_forward_entry)
103109- kfree(tbl);
103110+ kfree(tbl);
103111 err_alloc_ctl:
103112 #endif
103113 if (dflt != &ipv4_devconf_dflt)
103114diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
103115index 255aa99..45c78f8 100644
103116--- a/net/ipv4/fib_frontend.c
103117+++ b/net/ipv4/fib_frontend.c
103118@@ -1015,12 +1015,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
103119 #ifdef CONFIG_IP_ROUTE_MULTIPATH
103120 fib_sync_up(dev);
103121 #endif
103122- atomic_inc(&net->ipv4.dev_addr_genid);
103123+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
103124 rt_cache_flush(dev_net(dev));
103125 break;
103126 case NETDEV_DOWN:
103127 fib_del_ifaddr(ifa, NULL);
103128- atomic_inc(&net->ipv4.dev_addr_genid);
103129+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
103130 if (ifa->ifa_dev->ifa_list == NULL) {
103131 /* Last address was deleted from this interface.
103132 * Disable IP.
103133@@ -1058,7 +1058,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
103134 #ifdef CONFIG_IP_ROUTE_MULTIPATH
103135 fib_sync_up(dev);
103136 #endif
103137- atomic_inc(&net->ipv4.dev_addr_genid);
103138+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
103139 rt_cache_flush(net);
103140 break;
103141 case NETDEV_DOWN:
103142diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
103143index b10cd43a..22327f9 100644
103144--- a/net/ipv4/fib_semantics.c
103145+++ b/net/ipv4/fib_semantics.c
103146@@ -768,7 +768,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
103147 nh->nh_saddr = inet_select_addr(nh->nh_dev,
103148 nh->nh_gw,
103149 nh->nh_parent->fib_scope);
103150- nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
103151+ nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
103152
103153 return nh->nh_saddr;
103154 }
103155diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
103156index 6556263..db77807 100644
103157--- a/net/ipv4/gre_offload.c
103158+++ b/net/ipv4/gre_offload.c
103159@@ -59,13 +59,13 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
103160 if (csum)
103161 skb->encap_hdr_csum = 1;
103162
103163- if (unlikely(!pskb_may_pull(skb, ghl)))
103164- goto out;
103165-
103166 /* setup inner skb. */
103167 skb->protocol = greh->protocol;
103168 skb->encapsulation = 0;
103169
103170+ if (unlikely(!pskb_may_pull(skb, ghl)))
103171+ goto out;
103172+
103173 __skb_pull(skb, ghl);
103174 skb_reset_mac_header(skb);
103175 skb_set_network_header(skb, skb_inner_network_offset(skb));
103176diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
103177index 43116e8..ba0916a8 100644
103178--- a/net/ipv4/inet_hashtables.c
103179+++ b/net/ipv4/inet_hashtables.c
103180@@ -18,6 +18,7 @@
103181 #include <linux/sched.h>
103182 #include <linux/slab.h>
103183 #include <linux/wait.h>
103184+#include <linux/security.h>
103185
103186 #include <net/inet_connection_sock.h>
103187 #include <net/inet_hashtables.h>
103188@@ -49,6 +50,8 @@ static unsigned int inet_sk_ehashfn(const struct sock *sk)
103189 return inet_ehashfn(net, laddr, lport, faddr, fport);
103190 }
103191
103192+extern void gr_update_task_in_ip_table(const struct inet_sock *inet);
103193+
103194 /*
103195 * Allocate and initialize a new local port bind bucket.
103196 * The bindhash mutex for snum's hash chain must be held here.
103197@@ -554,6 +557,8 @@ ok:
103198 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
103199 spin_unlock(&head->lock);
103200
103201+ gr_update_task_in_ip_table(inet_sk(sk));
103202+
103203 if (tw) {
103204 inet_twsk_deschedule(tw, death_row);
103205 while (twrefcnt) {
103206diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
103207index bd5f592..e80e605 100644
103208--- a/net/ipv4/inetpeer.c
103209+++ b/net/ipv4/inetpeer.c
103210@@ -482,7 +482,7 @@ relookup:
103211 if (p) {
103212 p->daddr = *daddr;
103213 atomic_set(&p->refcnt, 1);
103214- atomic_set(&p->rid, 0);
103215+ atomic_set_unchecked(&p->rid, 0);
103216 p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
103217 p->rate_tokens = 0;
103218 /* 60*HZ is arbitrary, but chosen enough high so that the first
103219diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
103220index 15f0e2b..8cf8177 100644
103221--- a/net/ipv4/ip_fragment.c
103222+++ b/net/ipv4/ip_fragment.c
103223@@ -268,7 +268,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
103224 return 0;
103225
103226 start = qp->rid;
103227- end = atomic_inc_return(&peer->rid);
103228+ end = atomic_inc_return_unchecked(&peer->rid);
103229 qp->rid = end;
103230
103231 rc = qp->q.fragments && (end - start) > max;
103232@@ -746,12 +746,11 @@ static struct ctl_table ip4_frags_ctl_table[] = {
103233
103234 static int __net_init ip4_frags_ns_ctl_register(struct net *net)
103235 {
103236- struct ctl_table *table;
103237+ ctl_table_no_const *table = NULL;
103238 struct ctl_table_header *hdr;
103239
103240- table = ip4_frags_ns_ctl_table;
103241 if (!net_eq(net, &init_net)) {
103242- table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
103243+ table = kmemdup(ip4_frags_ns_ctl_table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
103244 if (table == NULL)
103245 goto err_alloc;
103246
103247@@ -765,9 +764,10 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
103248 /* Don't export sysctls to unprivileged users */
103249 if (net->user_ns != &init_user_ns)
103250 table[0].procname = NULL;
103251- }
103252+ hdr = register_net_sysctl(net, "net/ipv4", table);
103253+ } else
103254+ hdr = register_net_sysctl(net, "net/ipv4", ip4_frags_ns_ctl_table);
103255
103256- hdr = register_net_sysctl(net, "net/ipv4", table);
103257 if (hdr == NULL)
103258 goto err_reg;
103259
103260@@ -775,8 +775,7 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
103261 return 0;
103262
103263 err_reg:
103264- if (!net_eq(net, &init_net))
103265- kfree(table);
103266+ kfree(table);
103267 err_alloc:
103268 return -ENOMEM;
103269 }
103270diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
103271index 9b84254..c776611 100644
103272--- a/net/ipv4/ip_gre.c
103273+++ b/net/ipv4/ip_gre.c
103274@@ -115,7 +115,7 @@ static bool log_ecn_error = true;
103275 module_param(log_ecn_error, bool, 0644);
103276 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
103277
103278-static struct rtnl_link_ops ipgre_link_ops __read_mostly;
103279+static struct rtnl_link_ops ipgre_link_ops;
103280 static int ipgre_tunnel_init(struct net_device *dev);
103281
103282 static int ipgre_net_id __read_mostly;
103283@@ -733,7 +733,7 @@ static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
103284 [IFLA_GRE_PMTUDISC] = { .type = NLA_U8 },
103285 };
103286
103287-static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
103288+static struct rtnl_link_ops ipgre_link_ops = {
103289 .kind = "gre",
103290 .maxtype = IFLA_GRE_MAX,
103291 .policy = ipgre_policy,
103292@@ -747,7 +747,7 @@ static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
103293 .fill_info = ipgre_fill_info,
103294 };
103295
103296-static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
103297+static struct rtnl_link_ops ipgre_tap_ops = {
103298 .kind = "gretap",
103299 .maxtype = IFLA_GRE_MAX,
103300 .policy = ipgre_policy,
103301diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
103302index 3d4da2c..40f9c29 100644
103303--- a/net/ipv4/ip_input.c
103304+++ b/net/ipv4/ip_input.c
103305@@ -147,6 +147,10 @@
103306 #include <linux/mroute.h>
103307 #include <linux/netlink.h>
103308
103309+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103310+extern int grsec_enable_blackhole;
103311+#endif
103312+
103313 /*
103314 * Process Router Attention IP option (RFC 2113)
103315 */
103316@@ -223,6 +227,9 @@ static int ip_local_deliver_finish(struct sk_buff *skb)
103317 if (!raw) {
103318 if (xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
103319 IP_INC_STATS_BH(net, IPSTATS_MIB_INUNKNOWNPROTOS);
103320+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103321+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
103322+#endif
103323 icmp_send(skb, ICMP_DEST_UNREACH,
103324 ICMP_PROT_UNREACH, 0);
103325 }
103326diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
103327index 215af2b..73cbbe1 100644
103328--- a/net/ipv4/ip_output.c
103329+++ b/net/ipv4/ip_output.c
103330@@ -231,7 +231,7 @@ static int ip_finish_output_gso(struct sk_buff *skb)
103331 */
103332 features = netif_skb_features(skb);
103333 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
103334- if (IS_ERR(segs)) {
103335+ if (IS_ERR_OR_NULL(segs)) {
103336 kfree_skb(skb);
103337 return -ENOMEM;
103338 }
103339@@ -1533,6 +1533,7 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr,
103340 struct sk_buff *nskb;
103341 struct sock *sk;
103342 struct inet_sock *inet;
103343+ int err;
103344
103345 if (ip_options_echo(&replyopts.opt.opt, skb))
103346 return;
103347@@ -1572,8 +1573,13 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr,
103348 sock_net_set(sk, net);
103349 __skb_queue_head_init(&sk->sk_write_queue);
103350 sk->sk_sndbuf = sysctl_wmem_default;
103351- ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base, len, 0,
103352- &ipc, &rt, MSG_DONTWAIT);
103353+ err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base,
103354+ len, 0, &ipc, &rt, MSG_DONTWAIT);
103355+ if (unlikely(err)) {
103356+ ip_flush_pending_frames(sk);
103357+ goto out;
103358+ }
103359+
103360 nskb = skb_peek(&sk->sk_write_queue);
103361 if (nskb) {
103362 if (arg->csumoffset >= 0)
103363@@ -1585,7 +1591,7 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr,
103364 skb_set_queue_mapping(nskb, skb_get_queue_mapping(skb));
103365 ip_push_pending_frames(sk, &fl4);
103366 }
103367-
103368+out:
103369 put_cpu_var(unicast_sock);
103370
103371 ip_rt_put(rt);
103372diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
103373index 5cb830c..81a7a56 100644
103374--- a/net/ipv4/ip_sockglue.c
103375+++ b/net/ipv4/ip_sockglue.c
103376@@ -1188,7 +1188,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
103377 len = min_t(unsigned int, len, opt->optlen);
103378 if (put_user(len, optlen))
103379 return -EFAULT;
103380- if (copy_to_user(optval, opt->__data, len))
103381+ if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
103382+ copy_to_user(optval, opt->__data, len))
103383 return -EFAULT;
103384 return 0;
103385 }
103386@@ -1319,7 +1320,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
103387 if (sk->sk_type != SOCK_STREAM)
103388 return -ENOPROTOOPT;
103389
103390- msg.msg_control = (__force void *) optval;
103391+ msg.msg_control = (__force_kernel void *) optval;
103392 msg.msg_controllen = len;
103393 msg.msg_flags = flags;
103394
103395diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
103396index f4c987b..88c386c 100644
103397--- a/net/ipv4/ip_tunnel_core.c
103398+++ b/net/ipv4/ip_tunnel_core.c
103399@@ -91,11 +91,12 @@ int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto)
103400 skb_pull_rcsum(skb, hdr_len);
103401
103402 if (inner_proto == htons(ETH_P_TEB)) {
103403- struct ethhdr *eh = (struct ethhdr *)skb->data;
103404+ struct ethhdr *eh;
103405
103406 if (unlikely(!pskb_may_pull(skb, ETH_HLEN)))
103407 return -ENOMEM;
103408
103409+ eh = (struct ethhdr *)skb->data;
103410 if (likely(ntohs(eh->h_proto) >= ETH_P_802_3_MIN))
103411 skb->protocol = eh->h_proto;
103412 else
103413diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
103414index e453cb7..3c8d952 100644
103415--- a/net/ipv4/ip_vti.c
103416+++ b/net/ipv4/ip_vti.c
103417@@ -45,7 +45,7 @@
103418 #include <net/net_namespace.h>
103419 #include <net/netns/generic.h>
103420
103421-static struct rtnl_link_ops vti_link_ops __read_mostly;
103422+static struct rtnl_link_ops vti_link_ops;
103423
103424 static int vti_net_id __read_mostly;
103425 static int vti_tunnel_init(struct net_device *dev);
103426@@ -519,7 +519,7 @@ static const struct nla_policy vti_policy[IFLA_VTI_MAX + 1] = {
103427 [IFLA_VTI_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
103428 };
103429
103430-static struct rtnl_link_ops vti_link_ops __read_mostly = {
103431+static struct rtnl_link_ops vti_link_ops = {
103432 .kind = "vti",
103433 .maxtype = IFLA_VTI_MAX,
103434 .policy = vti_policy,
103435diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
103436index 5bbef4f..5bc4fb6 100644
103437--- a/net/ipv4/ipconfig.c
103438+++ b/net/ipv4/ipconfig.c
103439@@ -332,7 +332,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
103440
103441 mm_segment_t oldfs = get_fs();
103442 set_fs(get_ds());
103443- res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
103444+ res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
103445 set_fs(oldfs);
103446 return res;
103447 }
103448@@ -343,7 +343,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
103449
103450 mm_segment_t oldfs = get_fs();
103451 set_fs(get_ds());
103452- res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
103453+ res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
103454 set_fs(oldfs);
103455 return res;
103456 }
103457@@ -354,7 +354,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
103458
103459 mm_segment_t oldfs = get_fs();
103460 set_fs(get_ds());
103461- res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
103462+ res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
103463 set_fs(oldfs);
103464 return res;
103465 }
103466diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
103467index 62eaa00..29b2dc2 100644
103468--- a/net/ipv4/ipip.c
103469+++ b/net/ipv4/ipip.c
103470@@ -124,7 +124,7 @@ MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
103471 static int ipip_net_id __read_mostly;
103472
103473 static int ipip_tunnel_init(struct net_device *dev);
103474-static struct rtnl_link_ops ipip_link_ops __read_mostly;
103475+static struct rtnl_link_ops ipip_link_ops;
103476
103477 static int ipip_err(struct sk_buff *skb, u32 info)
103478 {
103479@@ -409,7 +409,7 @@ static const struct nla_policy ipip_policy[IFLA_IPTUN_MAX + 1] = {
103480 [IFLA_IPTUN_PMTUDISC] = { .type = NLA_U8 },
103481 };
103482
103483-static struct rtnl_link_ops ipip_link_ops __read_mostly = {
103484+static struct rtnl_link_ops ipip_link_ops = {
103485 .kind = "ipip",
103486 .maxtype = IFLA_IPTUN_MAX,
103487 .policy = ipip_policy,
103488diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
103489index f95b6f9..2ee2097 100644
103490--- a/net/ipv4/netfilter/arp_tables.c
103491+++ b/net/ipv4/netfilter/arp_tables.c
103492@@ -885,14 +885,14 @@ static int compat_table_info(const struct xt_table_info *info,
103493 #endif
103494
103495 static int get_info(struct net *net, void __user *user,
103496- const int *len, int compat)
103497+ int len, int compat)
103498 {
103499 char name[XT_TABLE_MAXNAMELEN];
103500 struct xt_table *t;
103501 int ret;
103502
103503- if (*len != sizeof(struct arpt_getinfo)) {
103504- duprintf("length %u != %Zu\n", *len,
103505+ if (len != sizeof(struct arpt_getinfo)) {
103506+ duprintf("length %u != %Zu\n", len,
103507 sizeof(struct arpt_getinfo));
103508 return -EINVAL;
103509 }
103510@@ -929,7 +929,7 @@ static int get_info(struct net *net, void __user *user,
103511 info.size = private->size;
103512 strcpy(info.name, name);
103513
103514- if (copy_to_user(user, &info, *len) != 0)
103515+ if (copy_to_user(user, &info, len) != 0)
103516 ret = -EFAULT;
103517 else
103518 ret = 0;
103519@@ -1690,7 +1690,7 @@ static int compat_do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user,
103520
103521 switch (cmd) {
103522 case ARPT_SO_GET_INFO:
103523- ret = get_info(sock_net(sk), user, len, 1);
103524+ ret = get_info(sock_net(sk), user, *len, 1);
103525 break;
103526 case ARPT_SO_GET_ENTRIES:
103527 ret = compat_get_entries(sock_net(sk), user, len);
103528@@ -1735,7 +1735,7 @@ static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len
103529
103530 switch (cmd) {
103531 case ARPT_SO_GET_INFO:
103532- ret = get_info(sock_net(sk), user, len, 0);
103533+ ret = get_info(sock_net(sk), user, *len, 0);
103534 break;
103535
103536 case ARPT_SO_GET_ENTRIES:
103537diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
103538index 99e810f..3711b81 100644
103539--- a/net/ipv4/netfilter/ip_tables.c
103540+++ b/net/ipv4/netfilter/ip_tables.c
103541@@ -1073,14 +1073,14 @@ static int compat_table_info(const struct xt_table_info *info,
103542 #endif
103543
103544 static int get_info(struct net *net, void __user *user,
103545- const int *len, int compat)
103546+ int len, int compat)
103547 {
103548 char name[XT_TABLE_MAXNAMELEN];
103549 struct xt_table *t;
103550 int ret;
103551
103552- if (*len != sizeof(struct ipt_getinfo)) {
103553- duprintf("length %u != %zu\n", *len,
103554+ if (len != sizeof(struct ipt_getinfo)) {
103555+ duprintf("length %u != %zu\n", len,
103556 sizeof(struct ipt_getinfo));
103557 return -EINVAL;
103558 }
103559@@ -1117,7 +1117,7 @@ static int get_info(struct net *net, void __user *user,
103560 info.size = private->size;
103561 strcpy(info.name, name);
103562
103563- if (copy_to_user(user, &info, *len) != 0)
103564+ if (copy_to_user(user, &info, len) != 0)
103565 ret = -EFAULT;
103566 else
103567 ret = 0;
103568@@ -1973,7 +1973,7 @@ compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
103569
103570 switch (cmd) {
103571 case IPT_SO_GET_INFO:
103572- ret = get_info(sock_net(sk), user, len, 1);
103573+ ret = get_info(sock_net(sk), user, *len, 1);
103574 break;
103575 case IPT_SO_GET_ENTRIES:
103576 ret = compat_get_entries(sock_net(sk), user, len);
103577@@ -2020,7 +2020,7 @@ do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
103578
103579 switch (cmd) {
103580 case IPT_SO_GET_INFO:
103581- ret = get_info(sock_net(sk), user, len, 0);
103582+ ret = get_info(sock_net(sk), user, *len, 0);
103583 break;
103584
103585 case IPT_SO_GET_ENTRIES:
103586diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
103587index 2510c02..cfb34fa 100644
103588--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
103589+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
103590@@ -720,7 +720,7 @@ static int clusterip_net_init(struct net *net)
103591 spin_lock_init(&cn->lock);
103592
103593 #ifdef CONFIG_PROC_FS
103594- cn->procdir = proc_mkdir("ipt_CLUSTERIP", net->proc_net);
103595+ cn->procdir = proc_mkdir_restrict("ipt_CLUSTERIP", net->proc_net);
103596 if (!cn->procdir) {
103597 pr_err("Unable to proc dir entry\n");
103598 return -ENOMEM;
103599diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
103600index a3c59a0..ec620a50 100644
103601--- a/net/ipv4/ping.c
103602+++ b/net/ipv4/ping.c
103603@@ -59,7 +59,7 @@ struct ping_table {
103604 };
103605
103606 static struct ping_table ping_table;
103607-struct pingv6_ops pingv6_ops;
103608+struct pingv6_ops *pingv6_ops;
103609 EXPORT_SYMBOL_GPL(pingv6_ops);
103610
103611 static u16 ping_port_rover;
103612@@ -348,7 +348,7 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk,
103613 return -ENODEV;
103614 }
103615 }
103616- has_addr = pingv6_ops.ipv6_chk_addr(net, &addr->sin6_addr, dev,
103617+ has_addr = pingv6_ops->ipv6_chk_addr(net, &addr->sin6_addr, dev,
103618 scoped);
103619 rcu_read_unlock();
103620
103621@@ -556,7 +556,7 @@ void ping_err(struct sk_buff *skb, int offset, u32 info)
103622 }
103623 #if IS_ENABLED(CONFIG_IPV6)
103624 } else if (skb->protocol == htons(ETH_P_IPV6)) {
103625- harderr = pingv6_ops.icmpv6_err_convert(type, code, &err);
103626+ harderr = pingv6_ops->icmpv6_err_convert(type, code, &err);
103627 #endif
103628 }
103629
103630@@ -574,7 +574,7 @@ void ping_err(struct sk_buff *skb, int offset, u32 info)
103631 info, (u8 *)icmph);
103632 #if IS_ENABLED(CONFIG_IPV6)
103633 } else if (family == AF_INET6) {
103634- pingv6_ops.ipv6_icmp_error(sk, skb, err, 0,
103635+ pingv6_ops->ipv6_icmp_error(sk, skb, err, 0,
103636 info, (u8 *)icmph);
103637 #endif
103638 }
103639@@ -858,7 +858,7 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
103640 return ip_recv_error(sk, msg, len, addr_len);
103641 #if IS_ENABLED(CONFIG_IPV6)
103642 } else if (family == AF_INET6) {
103643- return pingv6_ops.ipv6_recv_error(sk, msg, len,
103644+ return pingv6_ops->ipv6_recv_error(sk, msg, len,
103645 addr_len);
103646 #endif
103647 }
103648@@ -916,10 +916,10 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
103649 }
103650
103651 if (inet6_sk(sk)->rxopt.all)
103652- pingv6_ops.ip6_datagram_recv_common_ctl(sk, msg, skb);
103653+ pingv6_ops->ip6_datagram_recv_common_ctl(sk, msg, skb);
103654 if (skb->protocol == htons(ETH_P_IPV6) &&
103655 inet6_sk(sk)->rxopt.all)
103656- pingv6_ops.ip6_datagram_recv_specific_ctl(sk, msg, skb);
103657+ pingv6_ops->ip6_datagram_recv_specific_ctl(sk, msg, skb);
103658 else if (skb->protocol == htons(ETH_P_IP) && isk->cmsg_flags)
103659 ip_cmsg_recv(msg, skb);
103660 #endif
103661@@ -1111,7 +1111,7 @@ static void ping_v4_format_sock(struct sock *sp, struct seq_file *f,
103662 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
103663 0, sock_i_ino(sp),
103664 atomic_read(&sp->sk_refcnt), sp,
103665- atomic_read(&sp->sk_drops));
103666+ atomic_read_unchecked(&sp->sk_drops));
103667 }
103668
103669 static int ping_v4_seq_show(struct seq_file *seq, void *v)
103670diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
103671index 739db31..74f0210 100644
103672--- a/net/ipv4/raw.c
103673+++ b/net/ipv4/raw.c
103674@@ -314,7 +314,7 @@ static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
103675 int raw_rcv(struct sock *sk, struct sk_buff *skb)
103676 {
103677 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
103678- atomic_inc(&sk->sk_drops);
103679+ atomic_inc_unchecked(&sk->sk_drops);
103680 kfree_skb(skb);
103681 return NET_RX_DROP;
103682 }
103683@@ -755,16 +755,20 @@ static int raw_init(struct sock *sk)
103684
103685 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
103686 {
103687+ struct icmp_filter filter;
103688+
103689 if (optlen > sizeof(struct icmp_filter))
103690 optlen = sizeof(struct icmp_filter);
103691- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
103692+ if (copy_from_user(&filter, optval, optlen))
103693 return -EFAULT;
103694+ raw_sk(sk)->filter = filter;
103695 return 0;
103696 }
103697
103698 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
103699 {
103700 int len, ret = -EFAULT;
103701+ struct icmp_filter filter;
103702
103703 if (get_user(len, optlen))
103704 goto out;
103705@@ -774,8 +778,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
103706 if (len > sizeof(struct icmp_filter))
103707 len = sizeof(struct icmp_filter);
103708 ret = -EFAULT;
103709- if (put_user(len, optlen) ||
103710- copy_to_user(optval, &raw_sk(sk)->filter, len))
103711+ filter = raw_sk(sk)->filter;
103712+ if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
103713 goto out;
103714 ret = 0;
103715 out: return ret;
103716@@ -1004,7 +1008,7 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
103717 0, 0L, 0,
103718 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
103719 0, sock_i_ino(sp),
103720- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
103721+ atomic_read(&sp->sk_refcnt), sp, atomic_read_unchecked(&sp->sk_drops));
103722 }
103723
103724 static int raw_seq_show(struct seq_file *seq, void *v)
103725diff --git a/net/ipv4/route.c b/net/ipv4/route.c
103726index cbadb94..691f99e 100644
103727--- a/net/ipv4/route.c
103728+++ b/net/ipv4/route.c
103729@@ -228,7 +228,7 @@ static const struct seq_operations rt_cache_seq_ops = {
103730
103731 static int rt_cache_seq_open(struct inode *inode, struct file *file)
103732 {
103733- return seq_open(file, &rt_cache_seq_ops);
103734+ return seq_open_restrict(file, &rt_cache_seq_ops);
103735 }
103736
103737 static const struct file_operations rt_cache_seq_fops = {
103738@@ -319,7 +319,7 @@ static const struct seq_operations rt_cpu_seq_ops = {
103739
103740 static int rt_cpu_seq_open(struct inode *inode, struct file *file)
103741 {
103742- return seq_open(file, &rt_cpu_seq_ops);
103743+ return seq_open_restrict(file, &rt_cpu_seq_ops);
103744 }
103745
103746 static const struct file_operations rt_cpu_seq_fops = {
103747@@ -357,7 +357,7 @@ static int rt_acct_proc_show(struct seq_file *m, void *v)
103748
103749 static int rt_acct_proc_open(struct inode *inode, struct file *file)
103750 {
103751- return single_open(file, rt_acct_proc_show, NULL);
103752+ return single_open_restrict(file, rt_acct_proc_show, NULL);
103753 }
103754
103755 static const struct file_operations rt_acct_proc_fops = {
103756@@ -459,11 +459,11 @@ static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
103757
103758 #define IP_IDENTS_SZ 2048u
103759 struct ip_ident_bucket {
103760- atomic_t id;
103761+ atomic_unchecked_t id;
103762 u32 stamp32;
103763 };
103764
103765-static struct ip_ident_bucket *ip_idents __read_mostly;
103766+static struct ip_ident_bucket ip_idents[IP_IDENTS_SZ] __read_mostly;
103767
103768 /* In order to protect privacy, we add a perturbation to identifiers
103769 * if one generator is seldom used. This makes hard for an attacker
103770@@ -479,7 +479,7 @@ u32 ip_idents_reserve(u32 hash, int segs)
103771 if (old != now && cmpxchg(&bucket->stamp32, old, now) == old)
103772 delta = prandom_u32_max(now - old);
103773
103774- return atomic_add_return(segs + delta, &bucket->id) - segs;
103775+ return atomic_add_return_unchecked(segs + delta, &bucket->id) - segs;
103776 }
103777 EXPORT_SYMBOL(ip_idents_reserve);
103778
103779@@ -2623,34 +2623,34 @@ static struct ctl_table ipv4_route_flush_table[] = {
103780 .maxlen = sizeof(int),
103781 .mode = 0200,
103782 .proc_handler = ipv4_sysctl_rtcache_flush,
103783+ .extra1 = &init_net,
103784 },
103785 { },
103786 };
103787
103788 static __net_init int sysctl_route_net_init(struct net *net)
103789 {
103790- struct ctl_table *tbl;
103791+ ctl_table_no_const *tbl = NULL;
103792
103793- tbl = ipv4_route_flush_table;
103794 if (!net_eq(net, &init_net)) {
103795- tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
103796+ tbl = kmemdup(ipv4_route_flush_table, sizeof(ipv4_route_flush_table), GFP_KERNEL);
103797 if (tbl == NULL)
103798 goto err_dup;
103799
103800 /* Don't export sysctls to unprivileged users */
103801 if (net->user_ns != &init_user_ns)
103802 tbl[0].procname = NULL;
103803- }
103804- tbl[0].extra1 = net;
103805+ tbl[0].extra1 = net;
103806+ net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
103807+ } else
103808+ net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", ipv4_route_flush_table);
103809
103810- net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
103811 if (net->ipv4.route_hdr == NULL)
103812 goto err_reg;
103813 return 0;
103814
103815 err_reg:
103816- if (tbl != ipv4_route_flush_table)
103817- kfree(tbl);
103818+ kfree(tbl);
103819 err_dup:
103820 return -ENOMEM;
103821 }
103822@@ -2673,8 +2673,8 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
103823
103824 static __net_init int rt_genid_init(struct net *net)
103825 {
103826- atomic_set(&net->ipv4.rt_genid, 0);
103827- atomic_set(&net->fnhe_genid, 0);
103828+ atomic_set_unchecked(&net->ipv4.rt_genid, 0);
103829+ atomic_set_unchecked(&net->fnhe_genid, 0);
103830 get_random_bytes(&net->ipv4.dev_addr_genid,
103831 sizeof(net->ipv4.dev_addr_genid));
103832 return 0;
103833@@ -2717,11 +2717,7 @@ int __init ip_rt_init(void)
103834 {
103835 int rc = 0;
103836
103837- ip_idents = kmalloc(IP_IDENTS_SZ * sizeof(*ip_idents), GFP_KERNEL);
103838- if (!ip_idents)
103839- panic("IP: failed to allocate ip_idents\n");
103840-
103841- prandom_bytes(ip_idents, IP_IDENTS_SZ * sizeof(*ip_idents));
103842+ prandom_bytes(ip_idents, sizeof(ip_idents));
103843
103844 #ifdef CONFIG_IP_ROUTE_CLASSID
103845 ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
103846diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
103847index 79a007c..5023029 100644
103848--- a/net/ipv4/sysctl_net_ipv4.c
103849+++ b/net/ipv4/sysctl_net_ipv4.c
103850@@ -60,7 +60,7 @@ static int ipv4_local_port_range(struct ctl_table *table, int write,
103851 container_of(table->data, struct net, ipv4.ip_local_ports.range);
103852 int ret;
103853 int range[2];
103854- struct ctl_table tmp = {
103855+ ctl_table_no_const tmp = {
103856 .data = &range,
103857 .maxlen = sizeof(range),
103858 .mode = table->mode,
103859@@ -118,7 +118,7 @@ static int ipv4_ping_group_range(struct ctl_table *table, int write,
103860 int ret;
103861 gid_t urange[2];
103862 kgid_t low, high;
103863- struct ctl_table tmp = {
103864+ ctl_table_no_const tmp = {
103865 .data = &urange,
103866 .maxlen = sizeof(urange),
103867 .mode = table->mode,
103868@@ -149,7 +149,7 @@ static int proc_tcp_congestion_control(struct ctl_table *ctl, int write,
103869 void __user *buffer, size_t *lenp, loff_t *ppos)
103870 {
103871 char val[TCP_CA_NAME_MAX];
103872- struct ctl_table tbl = {
103873+ ctl_table_no_const tbl = {
103874 .data = val,
103875 .maxlen = TCP_CA_NAME_MAX,
103876 };
103877@@ -168,7 +168,7 @@ static int proc_tcp_available_congestion_control(struct ctl_table *ctl,
103878 void __user *buffer, size_t *lenp,
103879 loff_t *ppos)
103880 {
103881- struct ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX, };
103882+ ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX, };
103883 int ret;
103884
103885 tbl.data = kmalloc(tbl.maxlen, GFP_USER);
103886@@ -185,7 +185,7 @@ static int proc_allowed_congestion_control(struct ctl_table *ctl,
103887 void __user *buffer, size_t *lenp,
103888 loff_t *ppos)
103889 {
103890- struct ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX };
103891+ ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX };
103892 int ret;
103893
103894 tbl.data = kmalloc(tbl.maxlen, GFP_USER);
103895@@ -204,7 +204,7 @@ static int proc_tcp_fastopen_key(struct ctl_table *ctl, int write,
103896 void __user *buffer, size_t *lenp,
103897 loff_t *ppos)
103898 {
103899- struct ctl_table tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
103900+ ctl_table_no_const tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
103901 struct tcp_fastopen_context *ctxt;
103902 int ret;
103903 u32 user_key[4]; /* 16 bytes, matching TCP_FASTOPEN_KEY_LENGTH */
103904@@ -857,13 +857,12 @@ static struct ctl_table ipv4_net_table[] = {
103905
103906 static __net_init int ipv4_sysctl_init_net(struct net *net)
103907 {
103908- struct ctl_table *table;
103909+ ctl_table_no_const *table = NULL;
103910
103911- table = ipv4_net_table;
103912 if (!net_eq(net, &init_net)) {
103913 int i;
103914
103915- table = kmemdup(table, sizeof(ipv4_net_table), GFP_KERNEL);
103916+ table = kmemdup(ipv4_net_table, sizeof(ipv4_net_table), GFP_KERNEL);
103917 if (table == NULL)
103918 goto err_alloc;
103919
103920@@ -872,7 +871,10 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
103921 table[i].data += (void *)net - (void *)&init_net;
103922 }
103923
103924- net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
103925+ if (!net_eq(net, &init_net))
103926+ net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
103927+ else
103928+ net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", ipv4_net_table);
103929 if (net->ipv4.ipv4_hdr == NULL)
103930 goto err_reg;
103931
103932diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
103933index a906e02..f3b6a0f 100644
103934--- a/net/ipv4/tcp_input.c
103935+++ b/net/ipv4/tcp_input.c
103936@@ -755,7 +755,7 @@ static void tcp_update_pacing_rate(struct sock *sk)
103937 * without any lock. We want to make sure compiler wont store
103938 * intermediate values in this location.
103939 */
103940- ACCESS_ONCE(sk->sk_pacing_rate) = min_t(u64, rate,
103941+ ACCESS_ONCE_RW(sk->sk_pacing_rate) = min_t(u64, rate,
103942 sk->sk_max_pacing_rate);
103943 }
103944
103945@@ -4488,7 +4488,7 @@ static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
103946 * simplifies code)
103947 */
103948 static void
103949-tcp_collapse(struct sock *sk, struct sk_buff_head *list,
103950+__intentional_overflow(5,6) tcp_collapse(struct sock *sk, struct sk_buff_head *list,
103951 struct sk_buff *head, struct sk_buff *tail,
103952 u32 start, u32 end)
103953 {
103954@@ -5546,6 +5546,7 @@ discard:
103955 tcp_paws_reject(&tp->rx_opt, 0))
103956 goto discard_and_undo;
103957
103958+#ifndef CONFIG_GRKERNSEC_NO_SIMULT_CONNECT
103959 if (th->syn) {
103960 /* We see SYN without ACK. It is attempt of
103961 * simultaneous connect with crossed SYNs.
103962@@ -5596,6 +5597,7 @@ discard:
103963 goto discard;
103964 #endif
103965 }
103966+#endif
103967 /* "fifth, if neither of the SYN or RST bits is set then
103968 * drop the segment and return."
103969 */
103970@@ -5642,7 +5644,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
103971 goto discard;
103972
103973 if (th->syn) {
103974- if (th->fin)
103975+ if (th->fin || th->urg || th->psh)
103976 goto discard;
103977 if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
103978 return 1;
103979diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
103980index cd17f00..1e1f252 100644
103981--- a/net/ipv4/tcp_ipv4.c
103982+++ b/net/ipv4/tcp_ipv4.c
103983@@ -91,6 +91,10 @@ int sysctl_tcp_low_latency __read_mostly;
103984 EXPORT_SYMBOL(sysctl_tcp_low_latency);
103985
103986
103987+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103988+extern int grsec_enable_blackhole;
103989+#endif
103990+
103991 #ifdef CONFIG_TCP_MD5SIG
103992 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
103993 __be32 daddr, __be32 saddr, const struct tcphdr *th);
103994@@ -1487,6 +1491,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
103995 return 0;
103996
103997 reset:
103998+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103999+ if (!grsec_enable_blackhole)
104000+#endif
104001 tcp_v4_send_reset(rsk, skb);
104002 discard:
104003 kfree_skb(skb);
104004@@ -1633,12 +1640,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
104005 TCP_SKB_CB(skb)->sacked = 0;
104006
104007 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
104008- if (!sk)
104009+ if (!sk) {
104010+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104011+ ret = 1;
104012+#endif
104013 goto no_tcp_socket;
104014-
104015+ }
104016 process:
104017- if (sk->sk_state == TCP_TIME_WAIT)
104018+ if (sk->sk_state == TCP_TIME_WAIT) {
104019+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104020+ ret = 2;
104021+#endif
104022 goto do_time_wait;
104023+ }
104024
104025 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
104026 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
104027@@ -1704,6 +1718,10 @@ csum_error:
104028 bad_packet:
104029 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
104030 } else {
104031+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104032+ if (!grsec_enable_blackhole || (ret == 1 &&
104033+ (skb->dev->flags & IFF_LOOPBACK)))
104034+#endif
104035 tcp_v4_send_reset(NULL, skb);
104036 }
104037
104038diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
104039index 1649988..6251843 100644
104040--- a/net/ipv4/tcp_minisocks.c
104041+++ b/net/ipv4/tcp_minisocks.c
104042@@ -27,6 +27,10 @@
104043 #include <net/inet_common.h>
104044 #include <net/xfrm.h>
104045
104046+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104047+extern int grsec_enable_blackhole;
104048+#endif
104049+
104050 int sysctl_tcp_syncookies __read_mostly = 1;
104051 EXPORT_SYMBOL(sysctl_tcp_syncookies);
104052
104053@@ -740,7 +744,10 @@ embryonic_reset:
104054 * avoid becoming vulnerable to outside attack aiming at
104055 * resetting legit local connections.
104056 */
104057- req->rsk_ops->send_reset(sk, skb);
104058+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104059+ if (!grsec_enable_blackhole)
104060+#endif
104061+ req->rsk_ops->send_reset(sk, skb);
104062 } else if (fastopen) { /* received a valid RST pkt */
104063 reqsk_fastopen_remove(sk, req, true);
104064 tcp_reset(sk);
104065diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
104066index 3b66610..bfbe23a 100644
104067--- a/net/ipv4/tcp_probe.c
104068+++ b/net/ipv4/tcp_probe.c
104069@@ -238,7 +238,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
104070 if (cnt + width >= len)
104071 break;
104072
104073- if (copy_to_user(buf + cnt, tbuf, width))
104074+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
104075 return -EFAULT;
104076 cnt += width;
104077 }
104078diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
104079index df90cd1..9ab2c9b 100644
104080--- a/net/ipv4/tcp_timer.c
104081+++ b/net/ipv4/tcp_timer.c
104082@@ -22,6 +22,10 @@
104083 #include <linux/gfp.h>
104084 #include <net/tcp.h>
104085
104086+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104087+extern int grsec_lastack_retries;
104088+#endif
104089+
104090 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
104091 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
104092 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
104093@@ -192,6 +196,13 @@ static int tcp_write_timeout(struct sock *sk)
104094 }
104095 }
104096
104097+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104098+ if ((sk->sk_state == TCP_LAST_ACK) &&
104099+ (grsec_lastack_retries > 0) &&
104100+ (grsec_lastack_retries < retry_until))
104101+ retry_until = grsec_lastack_retries;
104102+#endif
104103+
104104 if (retransmits_timed_out(sk, retry_until,
104105 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
104106 /* Has it gone just too far? */
104107diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
104108index f57c0e4..cf24bd0 100644
104109--- a/net/ipv4/udp.c
104110+++ b/net/ipv4/udp.c
104111@@ -87,6 +87,7 @@
104112 #include <linux/types.h>
104113 #include <linux/fcntl.h>
104114 #include <linux/module.h>
104115+#include <linux/security.h>
104116 #include <linux/socket.h>
104117 #include <linux/sockios.h>
104118 #include <linux/igmp.h>
104119@@ -113,6 +114,10 @@
104120 #include <net/busy_poll.h>
104121 #include "udp_impl.h"
104122
104123+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104124+extern int grsec_enable_blackhole;
104125+#endif
104126+
104127 struct udp_table udp_table __read_mostly;
104128 EXPORT_SYMBOL(udp_table);
104129
104130@@ -594,6 +599,9 @@ static inline bool __udp_is_mcast_sock(struct net *net, struct sock *sk,
104131 return true;
104132 }
104133
104134+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
104135+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
104136+
104137 /*
104138 * This routine is called by the ICMP module when it gets some
104139 * sort of error condition. If err < 0 then the socket should
104140@@ -931,9 +939,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
104141 dport = usin->sin_port;
104142 if (dport == 0)
104143 return -EINVAL;
104144+
104145+ err = gr_search_udp_sendmsg(sk, usin);
104146+ if (err)
104147+ return err;
104148 } else {
104149 if (sk->sk_state != TCP_ESTABLISHED)
104150 return -EDESTADDRREQ;
104151+
104152+ err = gr_search_udp_sendmsg(sk, NULL);
104153+ if (err)
104154+ return err;
104155+
104156 daddr = inet->inet_daddr;
104157 dport = inet->inet_dport;
104158 /* Open fast path for connected socket.
104159@@ -1181,7 +1198,7 @@ static unsigned int first_packet_length(struct sock *sk)
104160 IS_UDPLITE(sk));
104161 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
104162 IS_UDPLITE(sk));
104163- atomic_inc(&sk->sk_drops);
104164+ atomic_inc_unchecked(&sk->sk_drops);
104165 __skb_unlink(skb, rcvq);
104166 __skb_queue_tail(&list_kill, skb);
104167 }
104168@@ -1261,6 +1278,10 @@ try_again:
104169 if (!skb)
104170 goto out;
104171
104172+ err = gr_search_udp_recvmsg(sk, skb);
104173+ if (err)
104174+ goto out_free;
104175+
104176 ulen = skb->len - sizeof(struct udphdr);
104177 copied = len;
104178 if (copied > ulen)
104179@@ -1294,7 +1315,7 @@ try_again:
104180 if (unlikely(err)) {
104181 trace_kfree_skb(skb, udp_recvmsg);
104182 if (!peeked) {
104183- atomic_inc(&sk->sk_drops);
104184+ atomic_inc_unchecked(&sk->sk_drops);
104185 UDP_INC_STATS_USER(sock_net(sk),
104186 UDP_MIB_INERRORS, is_udplite);
104187 }
104188@@ -1591,7 +1612,7 @@ csum_error:
104189 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
104190 drop:
104191 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
104192- atomic_inc(&sk->sk_drops);
104193+ atomic_inc_unchecked(&sk->sk_drops);
104194 kfree_skb(skb);
104195 return -1;
104196 }
104197@@ -1610,7 +1631,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
104198 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
104199
104200 if (!skb1) {
104201- atomic_inc(&sk->sk_drops);
104202+ atomic_inc_unchecked(&sk->sk_drops);
104203 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
104204 IS_UDPLITE(sk));
104205 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
104206@@ -1807,6 +1828,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
104207 goto csum_error;
104208
104209 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
104210+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104211+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
104212+#endif
104213 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
104214
104215 /*
104216@@ -2393,7 +2417,7 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
104217 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
104218 0, sock_i_ino(sp),
104219 atomic_read(&sp->sk_refcnt), sp,
104220- atomic_read(&sp->sk_drops));
104221+ atomic_read_unchecked(&sp->sk_drops));
104222 }
104223
104224 int udp4_seq_show(struct seq_file *seq, void *v)
104225diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
104226index 6156f68..d6ab46d 100644
104227--- a/net/ipv4/xfrm4_policy.c
104228+++ b/net/ipv4/xfrm4_policy.c
104229@@ -186,11 +186,11 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
104230 fl4->flowi4_tos = iph->tos;
104231 }
104232
104233-static inline int xfrm4_garbage_collect(struct dst_ops *ops)
104234+static int xfrm4_garbage_collect(struct dst_ops *ops)
104235 {
104236 struct net *net = container_of(ops, struct net, xfrm.xfrm4_dst_ops);
104237
104238- xfrm4_policy_afinfo.garbage_collect(net);
104239+ xfrm_garbage_collect_deferred(net);
104240 return (dst_entries_get_slow(ops) > ops->gc_thresh * 2);
104241 }
104242
104243@@ -269,19 +269,18 @@ static struct ctl_table xfrm4_policy_table[] = {
104244
104245 static int __net_init xfrm4_net_init(struct net *net)
104246 {
104247- struct ctl_table *table;
104248+ ctl_table_no_const *table = NULL;
104249 struct ctl_table_header *hdr;
104250
104251- table = xfrm4_policy_table;
104252 if (!net_eq(net, &init_net)) {
104253- table = kmemdup(table, sizeof(xfrm4_policy_table), GFP_KERNEL);
104254+ table = kmemdup(xfrm4_policy_table, sizeof(xfrm4_policy_table), GFP_KERNEL);
104255 if (!table)
104256 goto err_alloc;
104257
104258 table[0].data = &net->xfrm.xfrm4_dst_ops.gc_thresh;
104259- }
104260-
104261- hdr = register_net_sysctl(net, "net/ipv4", table);
104262+ hdr = register_net_sysctl(net, "net/ipv4", table);
104263+ } else
104264+ hdr = register_net_sysctl(net, "net/ipv4", xfrm4_policy_table);
104265 if (!hdr)
104266 goto err_reg;
104267
104268@@ -289,8 +288,7 @@ static int __net_init xfrm4_net_init(struct net *net)
104269 return 0;
104270
104271 err_reg:
104272- if (!net_eq(net, &init_net))
104273- kfree(table);
104274+ kfree(table);
104275 err_alloc:
104276 return -ENOMEM;
104277 }
104278diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
104279index 3e118df..27b16cf 100644
104280--- a/net/ipv6/addrconf.c
104281+++ b/net/ipv6/addrconf.c
104282@@ -604,7 +604,7 @@ static int inet6_netconf_dump_devconf(struct sk_buff *skb,
104283 idx = 0;
104284 head = &net->dev_index_head[h];
104285 rcu_read_lock();
104286- cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^
104287+ cb->seq = atomic_read_unchecked(&net->ipv6.dev_addr_genid) ^
104288 net->dev_base_seq;
104289 hlist_for_each_entry_rcu(dev, head, index_hlist) {
104290 if (idx < s_idx)
104291@@ -2396,7 +2396,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
104292 p.iph.ihl = 5;
104293 p.iph.protocol = IPPROTO_IPV6;
104294 p.iph.ttl = 64;
104295- ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
104296+ ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
104297
104298 if (ops->ndo_do_ioctl) {
104299 mm_segment_t oldfs = get_fs();
104300@@ -3531,16 +3531,23 @@ static const struct file_operations if6_fops = {
104301 .release = seq_release_net,
104302 };
104303
104304+extern void register_ipv6_seq_ops_addr(struct seq_operations *addr);
104305+extern void unregister_ipv6_seq_ops_addr(void);
104306+
104307 static int __net_init if6_proc_net_init(struct net *net)
104308 {
104309- if (!proc_create("if_inet6", S_IRUGO, net->proc_net, &if6_fops))
104310+ register_ipv6_seq_ops_addr(&if6_seq_ops);
104311+ if (!proc_create("if_inet6", S_IRUGO, net->proc_net, &if6_fops)) {
104312+ unregister_ipv6_seq_ops_addr();
104313 return -ENOMEM;
104314+ }
104315 return 0;
104316 }
104317
104318 static void __net_exit if6_proc_net_exit(struct net *net)
104319 {
104320 remove_proc_entry("if_inet6", net->proc_net);
104321+ unregister_ipv6_seq_ops_addr();
104322 }
104323
104324 static struct pernet_operations if6_proc_net_ops = {
104325@@ -4156,7 +4163,7 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
104326 s_ip_idx = ip_idx = cb->args[2];
104327
104328 rcu_read_lock();
104329- cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^ net->dev_base_seq;
104330+ cb->seq = atomic_read_unchecked(&net->ipv6.dev_addr_genid) ^ net->dev_base_seq;
104331 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
104332 idx = 0;
104333 head = &net->dev_index_head[h];
104334@@ -4784,7 +4791,7 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
104335 rt_genid_bump_ipv6(net);
104336 break;
104337 }
104338- atomic_inc(&net->ipv6.dev_addr_genid);
104339+ atomic_inc_unchecked(&net->ipv6.dev_addr_genid);
104340 }
104341
104342 static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
104343@@ -4804,7 +4811,7 @@ int addrconf_sysctl_forward(struct ctl_table *ctl, int write,
104344 int *valp = ctl->data;
104345 int val = *valp;
104346 loff_t pos = *ppos;
104347- struct ctl_table lctl;
104348+ ctl_table_no_const lctl;
104349 int ret;
104350
104351 /*
104352@@ -4889,7 +4896,7 @@ int addrconf_sysctl_disable(struct ctl_table *ctl, int write,
104353 int *valp = ctl->data;
104354 int val = *valp;
104355 loff_t pos = *ppos;
104356- struct ctl_table lctl;
104357+ ctl_table_no_const lctl;
104358 int ret;
104359
104360 /*
104361diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
104362index 2daa3a1..341066c 100644
104363--- a/net/ipv6/af_inet6.c
104364+++ b/net/ipv6/af_inet6.c
104365@@ -766,7 +766,7 @@ static int __net_init inet6_net_init(struct net *net)
104366 net->ipv6.sysctl.icmpv6_time = 1*HZ;
104367 net->ipv6.sysctl.flowlabel_consistency = 1;
104368 net->ipv6.sysctl.auto_flowlabels = 0;
104369- atomic_set(&net->ipv6.rt_genid, 0);
104370+ atomic_set_unchecked(&net->ipv6.rt_genid, 0);
104371
104372 err = ipv6_init_mibs(net);
104373 if (err)
104374diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
104375index 2753319..b7e625c 100644
104376--- a/net/ipv6/datagram.c
104377+++ b/net/ipv6/datagram.c
104378@@ -939,5 +939,5 @@ void ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
104379 0,
104380 sock_i_ino(sp),
104381 atomic_read(&sp->sk_refcnt), sp,
104382- atomic_read(&sp->sk_drops));
104383+ atomic_read_unchecked(&sp->sk_drops));
104384 }
104385diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
104386index 06ba3e5..5c08d38 100644
104387--- a/net/ipv6/icmp.c
104388+++ b/net/ipv6/icmp.c
104389@@ -993,7 +993,7 @@ static struct ctl_table ipv6_icmp_table_template[] = {
104390
104391 struct ctl_table * __net_init ipv6_icmp_sysctl_init(struct net *net)
104392 {
104393- struct ctl_table *table;
104394+ ctl_table_no_const *table;
104395
104396 table = kmemdup(ipv6_icmp_table_template,
104397 sizeof(ipv6_icmp_table_template),
104398diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
104399index 97299d7..c8e6894 100644
104400--- a/net/ipv6/ip6_gre.c
104401+++ b/net/ipv6/ip6_gre.c
104402@@ -71,8 +71,8 @@ struct ip6gre_net {
104403 struct net_device *fb_tunnel_dev;
104404 };
104405
104406-static struct rtnl_link_ops ip6gre_link_ops __read_mostly;
104407-static struct rtnl_link_ops ip6gre_tap_ops __read_mostly;
104408+static struct rtnl_link_ops ip6gre_link_ops;
104409+static struct rtnl_link_ops ip6gre_tap_ops;
104410 static int ip6gre_tunnel_init(struct net_device *dev);
104411 static void ip6gre_tunnel_setup(struct net_device *dev);
104412 static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t);
104413@@ -1286,7 +1286,7 @@ static void ip6gre_fb_tunnel_init(struct net_device *dev)
104414 }
104415
104416
104417-static struct inet6_protocol ip6gre_protocol __read_mostly = {
104418+static struct inet6_protocol ip6gre_protocol = {
104419 .handler = ip6gre_rcv,
104420 .err_handler = ip6gre_err,
104421 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
104422@@ -1645,7 +1645,7 @@ static const struct nla_policy ip6gre_policy[IFLA_GRE_MAX + 1] = {
104423 [IFLA_GRE_FLAGS] = { .type = NLA_U32 },
104424 };
104425
104426-static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
104427+static struct rtnl_link_ops ip6gre_link_ops = {
104428 .kind = "ip6gre",
104429 .maxtype = IFLA_GRE_MAX,
104430 .policy = ip6gre_policy,
104431@@ -1659,7 +1659,7 @@ static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
104432 .fill_info = ip6gre_fill_info,
104433 };
104434
104435-static struct rtnl_link_ops ip6gre_tap_ops __read_mostly = {
104436+static struct rtnl_link_ops ip6gre_tap_ops = {
104437 .kind = "ip6gretap",
104438 .maxtype = IFLA_GRE_MAX,
104439 .policy = ip6gre_policy,
104440diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
104441index 65eda2a..620a102 100644
104442--- a/net/ipv6/ip6_offload.c
104443+++ b/net/ipv6/ip6_offload.c
104444@@ -46,6 +46,7 @@ static int ipv6_gso_pull_exthdrs(struct sk_buff *skb, int proto)
104445 if (unlikely(!pskb_may_pull(skb, len)))
104446 break;
104447
104448+ opth = (void *)skb->data;
104449 proto = opth->nexthdr;
104450 __skb_pull(skb, len);
104451 }
104452diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
104453index 69a84b4..881c319 100644
104454--- a/net/ipv6/ip6_tunnel.c
104455+++ b/net/ipv6/ip6_tunnel.c
104456@@ -86,7 +86,7 @@ static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
104457
104458 static int ip6_tnl_dev_init(struct net_device *dev);
104459 static void ip6_tnl_dev_setup(struct net_device *dev);
104460-static struct rtnl_link_ops ip6_link_ops __read_mostly;
104461+static struct rtnl_link_ops ip6_link_ops;
104462
104463 static int ip6_tnl_net_id __read_mostly;
104464 struct ip6_tnl_net {
104465@@ -1714,7 +1714,7 @@ static const struct nla_policy ip6_tnl_policy[IFLA_IPTUN_MAX + 1] = {
104466 [IFLA_IPTUN_PROTO] = { .type = NLA_U8 },
104467 };
104468
104469-static struct rtnl_link_ops ip6_link_ops __read_mostly = {
104470+static struct rtnl_link_ops ip6_link_ops = {
104471 .kind = "ip6tnl",
104472 .maxtype = IFLA_IPTUN_MAX,
104473 .policy = ip6_tnl_policy,
104474diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
104475index 5833a22..6631377 100644
104476--- a/net/ipv6/ip6_vti.c
104477+++ b/net/ipv6/ip6_vti.c
104478@@ -62,7 +62,7 @@ static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
104479
104480 static int vti6_dev_init(struct net_device *dev);
104481 static void vti6_dev_setup(struct net_device *dev);
104482-static struct rtnl_link_ops vti6_link_ops __read_mostly;
104483+static struct rtnl_link_ops vti6_link_ops;
104484
104485 static int vti6_net_id __read_mostly;
104486 struct vti6_net {
104487@@ -981,7 +981,7 @@ static const struct nla_policy vti6_policy[IFLA_VTI_MAX + 1] = {
104488 [IFLA_VTI_OKEY] = { .type = NLA_U32 },
104489 };
104490
104491-static struct rtnl_link_ops vti6_link_ops __read_mostly = {
104492+static struct rtnl_link_ops vti6_link_ops = {
104493 .kind = "vti6",
104494 .maxtype = IFLA_VTI_MAX,
104495 .policy = vti6_policy,
104496diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
104497index 0c28998..d0a2ecd 100644
104498--- a/net/ipv6/ipv6_sockglue.c
104499+++ b/net/ipv6/ipv6_sockglue.c
104500@@ -995,7 +995,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
104501 if (sk->sk_type != SOCK_STREAM)
104502 return -ENOPROTOOPT;
104503
104504- msg.msg_control = optval;
104505+ msg.msg_control = (void __force_kernel *)optval;
104506 msg.msg_controllen = len;
104507 msg.msg_flags = flags;
104508
104509diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
104510index e080fbb..412b3cf 100644
104511--- a/net/ipv6/netfilter/ip6_tables.c
104512+++ b/net/ipv6/netfilter/ip6_tables.c
104513@@ -1083,14 +1083,14 @@ static int compat_table_info(const struct xt_table_info *info,
104514 #endif
104515
104516 static int get_info(struct net *net, void __user *user,
104517- const int *len, int compat)
104518+ int len, int compat)
104519 {
104520 char name[XT_TABLE_MAXNAMELEN];
104521 struct xt_table *t;
104522 int ret;
104523
104524- if (*len != sizeof(struct ip6t_getinfo)) {
104525- duprintf("length %u != %zu\n", *len,
104526+ if (len != sizeof(struct ip6t_getinfo)) {
104527+ duprintf("length %u != %zu\n", len,
104528 sizeof(struct ip6t_getinfo));
104529 return -EINVAL;
104530 }
104531@@ -1127,7 +1127,7 @@ static int get_info(struct net *net, void __user *user,
104532 info.size = private->size;
104533 strcpy(info.name, name);
104534
104535- if (copy_to_user(user, &info, *len) != 0)
104536+ if (copy_to_user(user, &info, len) != 0)
104537 ret = -EFAULT;
104538 else
104539 ret = 0;
104540@@ -1983,7 +1983,7 @@ compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
104541
104542 switch (cmd) {
104543 case IP6T_SO_GET_INFO:
104544- ret = get_info(sock_net(sk), user, len, 1);
104545+ ret = get_info(sock_net(sk), user, *len, 1);
104546 break;
104547 case IP6T_SO_GET_ENTRIES:
104548 ret = compat_get_entries(sock_net(sk), user, len);
104549@@ -2030,7 +2030,7 @@ do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
104550
104551 switch (cmd) {
104552 case IP6T_SO_GET_INFO:
104553- ret = get_info(sock_net(sk), user, len, 0);
104554+ ret = get_info(sock_net(sk), user, *len, 0);
104555 break;
104556
104557 case IP6T_SO_GET_ENTRIES:
104558diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
104559index 6f187c8..34b367f 100644
104560--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
104561+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
104562@@ -96,12 +96,11 @@ static struct ctl_table nf_ct_frag6_sysctl_table[] = {
104563
104564 static int nf_ct_frag6_sysctl_register(struct net *net)
104565 {
104566- struct ctl_table *table;
104567+ ctl_table_no_const *table = NULL;
104568 struct ctl_table_header *hdr;
104569
104570- table = nf_ct_frag6_sysctl_table;
104571 if (!net_eq(net, &init_net)) {
104572- table = kmemdup(table, sizeof(nf_ct_frag6_sysctl_table),
104573+ table = kmemdup(nf_ct_frag6_sysctl_table, sizeof(nf_ct_frag6_sysctl_table),
104574 GFP_KERNEL);
104575 if (table == NULL)
104576 goto err_alloc;
104577@@ -112,9 +111,9 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
104578 table[2].data = &net->nf_frag.frags.high_thresh;
104579 table[2].extra1 = &net->nf_frag.frags.low_thresh;
104580 table[2].extra2 = &init_net.nf_frag.frags.high_thresh;
104581- }
104582-
104583- hdr = register_net_sysctl(net, "net/netfilter", table);
104584+ hdr = register_net_sysctl(net, "net/netfilter", table);
104585+ } else
104586+ hdr = register_net_sysctl(net, "net/netfilter", nf_ct_frag6_sysctl_table);
104587 if (hdr == NULL)
104588 goto err_reg;
104589
104590@@ -122,8 +121,7 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
104591 return 0;
104592
104593 err_reg:
104594- if (!net_eq(net, &init_net))
104595- kfree(table);
104596+ kfree(table);
104597 err_alloc:
104598 return -ENOMEM;
104599 }
104600diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
104601index 5b7a1ed..d9da205 100644
104602--- a/net/ipv6/ping.c
104603+++ b/net/ipv6/ping.c
104604@@ -240,6 +240,24 @@ static struct pernet_operations ping_v6_net_ops = {
104605 };
104606 #endif
104607
104608+static struct pingv6_ops real_pingv6_ops = {
104609+ .ipv6_recv_error = ipv6_recv_error,
104610+ .ip6_datagram_recv_common_ctl = ip6_datagram_recv_common_ctl,
104611+ .ip6_datagram_recv_specific_ctl = ip6_datagram_recv_specific_ctl,
104612+ .icmpv6_err_convert = icmpv6_err_convert,
104613+ .ipv6_icmp_error = ipv6_icmp_error,
104614+ .ipv6_chk_addr = ipv6_chk_addr,
104615+};
104616+
104617+static struct pingv6_ops dummy_pingv6_ops = {
104618+ .ipv6_recv_error = dummy_ipv6_recv_error,
104619+ .ip6_datagram_recv_common_ctl = dummy_ip6_datagram_recv_ctl,
104620+ .ip6_datagram_recv_specific_ctl = dummy_ip6_datagram_recv_ctl,
104621+ .icmpv6_err_convert = dummy_icmpv6_err_convert,
104622+ .ipv6_icmp_error = dummy_ipv6_icmp_error,
104623+ .ipv6_chk_addr = dummy_ipv6_chk_addr,
104624+};
104625+
104626 int __init pingv6_init(void)
104627 {
104628 #ifdef CONFIG_PROC_FS
104629@@ -247,13 +265,7 @@ int __init pingv6_init(void)
104630 if (ret)
104631 return ret;
104632 #endif
104633- pingv6_ops.ipv6_recv_error = ipv6_recv_error;
104634- pingv6_ops.ip6_datagram_recv_common_ctl = ip6_datagram_recv_common_ctl;
104635- pingv6_ops.ip6_datagram_recv_specific_ctl =
104636- ip6_datagram_recv_specific_ctl;
104637- pingv6_ops.icmpv6_err_convert = icmpv6_err_convert;
104638- pingv6_ops.ipv6_icmp_error = ipv6_icmp_error;
104639- pingv6_ops.ipv6_chk_addr = ipv6_chk_addr;
104640+ pingv6_ops = &real_pingv6_ops;
104641 return inet6_register_protosw(&pingv6_protosw);
104642 }
104643
104644@@ -262,14 +274,9 @@ int __init pingv6_init(void)
104645 */
104646 void pingv6_exit(void)
104647 {
104648- pingv6_ops.ipv6_recv_error = dummy_ipv6_recv_error;
104649- pingv6_ops.ip6_datagram_recv_common_ctl = dummy_ip6_datagram_recv_ctl;
104650- pingv6_ops.ip6_datagram_recv_specific_ctl = dummy_ip6_datagram_recv_ctl;
104651- pingv6_ops.icmpv6_err_convert = dummy_icmpv6_err_convert;
104652- pingv6_ops.ipv6_icmp_error = dummy_ipv6_icmp_error;
104653- pingv6_ops.ipv6_chk_addr = dummy_ipv6_chk_addr;
104654 #ifdef CONFIG_PROC_FS
104655 unregister_pernet_subsys(&ping_v6_net_ops);
104656 #endif
104657+ pingv6_ops = &dummy_pingv6_ops;
104658 inet6_unregister_protosw(&pingv6_protosw);
104659 }
104660diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
104661index 2d6f860..b0165f5 100644
104662--- a/net/ipv6/proc.c
104663+++ b/net/ipv6/proc.c
104664@@ -309,7 +309,7 @@ static int __net_init ipv6_proc_init_net(struct net *net)
104665 if (!proc_create("snmp6", S_IRUGO, net->proc_net, &snmp6_seq_fops))
104666 goto proc_snmp6_fail;
104667
104668- net->mib.proc_net_devsnmp6 = proc_mkdir("dev_snmp6", net->proc_net);
104669+ net->mib.proc_net_devsnmp6 = proc_mkdir_restrict("dev_snmp6", net->proc_net);
104670 if (!net->mib.proc_net_devsnmp6)
104671 goto proc_dev_snmp6_fail;
104672 return 0;
104673diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
104674index 39d4422..b0979547 100644
104675--- a/net/ipv6/raw.c
104676+++ b/net/ipv6/raw.c
104677@@ -388,7 +388,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
104678 {
104679 if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
104680 skb_checksum_complete(skb)) {
104681- atomic_inc(&sk->sk_drops);
104682+ atomic_inc_unchecked(&sk->sk_drops);
104683 kfree_skb(skb);
104684 return NET_RX_DROP;
104685 }
104686@@ -416,7 +416,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
104687 struct raw6_sock *rp = raw6_sk(sk);
104688
104689 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
104690- atomic_inc(&sk->sk_drops);
104691+ atomic_inc_unchecked(&sk->sk_drops);
104692 kfree_skb(skb);
104693 return NET_RX_DROP;
104694 }
104695@@ -440,7 +440,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
104696
104697 if (inet->hdrincl) {
104698 if (skb_checksum_complete(skb)) {
104699- atomic_inc(&sk->sk_drops);
104700+ atomic_inc_unchecked(&sk->sk_drops);
104701 kfree_skb(skb);
104702 return NET_RX_DROP;
104703 }
104704@@ -608,7 +608,7 @@ out:
104705 return err;
104706 }
104707
104708-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
104709+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
104710 struct flowi6 *fl6, struct dst_entry **dstp,
104711 unsigned int flags)
104712 {
104713@@ -914,12 +914,15 @@ do_confirm:
104714 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
104715 char __user *optval, int optlen)
104716 {
104717+ struct icmp6_filter filter;
104718+
104719 switch (optname) {
104720 case ICMPV6_FILTER:
104721 if (optlen > sizeof(struct icmp6_filter))
104722 optlen = sizeof(struct icmp6_filter);
104723- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
104724+ if (copy_from_user(&filter, optval, optlen))
104725 return -EFAULT;
104726+ raw6_sk(sk)->filter = filter;
104727 return 0;
104728 default:
104729 return -ENOPROTOOPT;
104730@@ -932,6 +935,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
104731 char __user *optval, int __user *optlen)
104732 {
104733 int len;
104734+ struct icmp6_filter filter;
104735
104736 switch (optname) {
104737 case ICMPV6_FILTER:
104738@@ -943,7 +947,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
104739 len = sizeof(struct icmp6_filter);
104740 if (put_user(len, optlen))
104741 return -EFAULT;
104742- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
104743+ filter = raw6_sk(sk)->filter;
104744+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
104745 return -EFAULT;
104746 return 0;
104747 default:
104748diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
104749index c6557d9..173e728 100644
104750--- a/net/ipv6/reassembly.c
104751+++ b/net/ipv6/reassembly.c
104752@@ -627,12 +627,11 @@ static struct ctl_table ip6_frags_ctl_table[] = {
104753
104754 static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
104755 {
104756- struct ctl_table *table;
104757+ ctl_table_no_const *table = NULL;
104758 struct ctl_table_header *hdr;
104759
104760- table = ip6_frags_ns_ctl_table;
104761 if (!net_eq(net, &init_net)) {
104762- table = kmemdup(table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
104763+ table = kmemdup(ip6_frags_ns_ctl_table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
104764 if (table == NULL)
104765 goto err_alloc;
104766
104767@@ -646,9 +645,10 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
104768 /* Don't export sysctls to unprivileged users */
104769 if (net->user_ns != &init_user_ns)
104770 table[0].procname = NULL;
104771- }
104772+ hdr = register_net_sysctl(net, "net/ipv6", table);
104773+ } else
104774+ hdr = register_net_sysctl(net, "net/ipv6", ip6_frags_ns_ctl_table);
104775
104776- hdr = register_net_sysctl(net, "net/ipv6", table);
104777 if (hdr == NULL)
104778 goto err_reg;
104779
104780@@ -656,8 +656,7 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
104781 return 0;
104782
104783 err_reg:
104784- if (!net_eq(net, &init_net))
104785- kfree(table);
104786+ kfree(table);
104787 err_alloc:
104788 return -ENOMEM;
104789 }
104790diff --git a/net/ipv6/route.c b/net/ipv6/route.c
104791index bafde82..af2c91f 100644
104792--- a/net/ipv6/route.c
104793+++ b/net/ipv6/route.c
104794@@ -2967,7 +2967,7 @@ struct ctl_table ipv6_route_table_template[] = {
104795
104796 struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
104797 {
104798- struct ctl_table *table;
104799+ ctl_table_no_const *table;
104800
104801 table = kmemdup(ipv6_route_table_template,
104802 sizeof(ipv6_route_table_template),
104803diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
104804index 6163f85..0070823 100644
104805--- a/net/ipv6/sit.c
104806+++ b/net/ipv6/sit.c
104807@@ -74,7 +74,7 @@ static void ipip6_tunnel_setup(struct net_device *dev);
104808 static void ipip6_dev_free(struct net_device *dev);
104809 static bool check_6rd(struct ip_tunnel *tunnel, const struct in6_addr *v6dst,
104810 __be32 *v4dst);
104811-static struct rtnl_link_ops sit_link_ops __read_mostly;
104812+static struct rtnl_link_ops sit_link_ops;
104813
104814 static int sit_net_id __read_mostly;
104815 struct sit_net {
104816@@ -485,11 +485,11 @@ static void ipip6_tunnel_uninit(struct net_device *dev)
104817 */
104818 static int ipip6_err_gen_icmpv6_unreach(struct sk_buff *skb)
104819 {
104820- const struct iphdr *iph = (const struct iphdr *) skb->data;
104821+ int ihl = ((const struct iphdr *)skb->data)->ihl*4;
104822 struct rt6_info *rt;
104823 struct sk_buff *skb2;
104824
104825- if (!pskb_may_pull(skb, iph->ihl * 4 + sizeof(struct ipv6hdr) + 8))
104826+ if (!pskb_may_pull(skb, ihl + sizeof(struct ipv6hdr) + 8))
104827 return 1;
104828
104829 skb2 = skb_clone(skb, GFP_ATOMIC);
104830@@ -498,7 +498,7 @@ static int ipip6_err_gen_icmpv6_unreach(struct sk_buff *skb)
104831 return 1;
104832
104833 skb_dst_drop(skb2);
104834- skb_pull(skb2, iph->ihl * 4);
104835+ skb_pull(skb2, ihl);
104836 skb_reset_network_header(skb2);
104837
104838 rt = rt6_lookup(dev_net(skb->dev), &ipv6_hdr(skb2)->saddr, NULL, 0, 0);
104839@@ -1662,7 +1662,7 @@ static void ipip6_dellink(struct net_device *dev, struct list_head *head)
104840 unregister_netdevice_queue(dev, head);
104841 }
104842
104843-static struct rtnl_link_ops sit_link_ops __read_mostly = {
104844+static struct rtnl_link_ops sit_link_ops = {
104845 .kind = "sit",
104846 .maxtype = IFLA_IPTUN_MAX,
104847 .policy = ipip6_policy,
104848diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
104849index 0c56c93..ece50df 100644
104850--- a/net/ipv6/sysctl_net_ipv6.c
104851+++ b/net/ipv6/sysctl_net_ipv6.c
104852@@ -68,7 +68,7 @@ static struct ctl_table ipv6_rotable[] = {
104853
104854 static int __net_init ipv6_sysctl_net_init(struct net *net)
104855 {
104856- struct ctl_table *ipv6_table;
104857+ ctl_table_no_const *ipv6_table;
104858 struct ctl_table *ipv6_route_table;
104859 struct ctl_table *ipv6_icmp_table;
104860 int err;
104861diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
104862index 29964c3..b8caecf 100644
104863--- a/net/ipv6/tcp_ipv6.c
104864+++ b/net/ipv6/tcp_ipv6.c
104865@@ -102,6 +102,10 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
104866 inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum;
104867 }
104868
104869+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104870+extern int grsec_enable_blackhole;
104871+#endif
104872+
104873 static void tcp_v6_hash(struct sock *sk)
104874 {
104875 if (sk->sk_state != TCP_CLOSE) {
104876@@ -1333,6 +1337,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
104877 return 0;
104878
104879 reset:
104880+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104881+ if (!grsec_enable_blackhole)
104882+#endif
104883 tcp_v6_send_reset(sk, skb);
104884 discard:
104885 if (opt_skb)
104886@@ -1417,12 +1424,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
104887 TCP_SKB_CB(skb)->sacked = 0;
104888
104889 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
104890- if (!sk)
104891+ if (!sk) {
104892+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104893+ ret = 1;
104894+#endif
104895 goto no_tcp_socket;
104896+ }
104897
104898 process:
104899- if (sk->sk_state == TCP_TIME_WAIT)
104900+ if (sk->sk_state == TCP_TIME_WAIT) {
104901+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104902+ ret = 2;
104903+#endif
104904 goto do_time_wait;
104905+ }
104906
104907 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
104908 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
104909@@ -1479,6 +1494,10 @@ csum_error:
104910 bad_packet:
104911 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
104912 } else {
104913+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104914+ if (!grsec_enable_blackhole || (ret == 1 &&
104915+ (skb->dev->flags & IFF_LOOPBACK)))
104916+#endif
104917 tcp_v6_send_reset(NULL, skb);
104918 }
104919
104920diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
104921index 4836af8..0e52bbd 100644
104922--- a/net/ipv6/udp.c
104923+++ b/net/ipv6/udp.c
104924@@ -76,6 +76,10 @@ static unsigned int udp6_ehashfn(struct net *net,
104925 udp_ipv6_hash_secret + net_hash_mix(net));
104926 }
104927
104928+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104929+extern int grsec_enable_blackhole;
104930+#endif
104931+
104932 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
104933 {
104934 const struct in6_addr *sk2_rcv_saddr6 = inet6_rcv_saddr(sk2);
104935@@ -434,7 +438,7 @@ try_again:
104936 if (unlikely(err)) {
104937 trace_kfree_skb(skb, udpv6_recvmsg);
104938 if (!peeked) {
104939- atomic_inc(&sk->sk_drops);
104940+ atomic_inc_unchecked(&sk->sk_drops);
104941 if (is_udp4)
104942 UDP_INC_STATS_USER(sock_net(sk),
104943 UDP_MIB_INERRORS,
104944@@ -701,7 +705,7 @@ csum_error:
104945 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
104946 drop:
104947 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
104948- atomic_inc(&sk->sk_drops);
104949+ atomic_inc_unchecked(&sk->sk_drops);
104950 kfree_skb(skb);
104951 return -1;
104952 }
104953@@ -740,7 +744,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
104954 if (likely(skb1 == NULL))
104955 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
104956 if (!skb1) {
104957- atomic_inc(&sk->sk_drops);
104958+ atomic_inc_unchecked(&sk->sk_drops);
104959 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
104960 IS_UDPLITE(sk));
104961 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
104962@@ -915,6 +919,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
104963 goto csum_error;
104964
104965 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
104966+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104967+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
104968+#endif
104969 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
104970
104971 kfree_skb(skb);
104972diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
104973index 2a0bbda..442240d 100644
104974--- a/net/ipv6/xfrm6_policy.c
104975+++ b/net/ipv6/xfrm6_policy.c
104976@@ -170,8 +170,10 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
104977 case IPPROTO_DCCP:
104978 if (!onlyproto && (nh + offset + 4 < skb->data ||
104979 pskb_may_pull(skb, nh + offset + 4 - skb->data))) {
104980- __be16 *ports = (__be16 *)exthdr;
104981+ __be16 *ports;
104982
104983+ nh = skb_network_header(skb);
104984+ ports = (__be16 *)(nh + offset);
104985 fl6->fl6_sport = ports[!!reverse];
104986 fl6->fl6_dport = ports[!reverse];
104987 }
104988@@ -180,8 +182,10 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
104989
104990 case IPPROTO_ICMPV6:
104991 if (!onlyproto && pskb_may_pull(skb, nh + offset + 2 - skb->data)) {
104992- u8 *icmp = (u8 *)exthdr;
104993+ u8 *icmp;
104994
104995+ nh = skb_network_header(skb);
104996+ icmp = (u8 *)(nh + offset);
104997 fl6->fl6_icmp_type = icmp[0];
104998 fl6->fl6_icmp_code = icmp[1];
104999 }
105000@@ -192,8 +196,9 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
105001 case IPPROTO_MH:
105002 if (!onlyproto && pskb_may_pull(skb, nh + offset + 3 - skb->data)) {
105003 struct ip6_mh *mh;
105004- mh = (struct ip6_mh *)exthdr;
105005
105006+ nh = skb_network_header(skb);
105007+ mh = (struct ip6_mh *)(nh + offset);
105008 fl6->fl6_mh_type = mh->ip6mh_type;
105009 }
105010 fl6->flowi6_proto = nexthdr;
105011@@ -212,11 +217,11 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
105012 }
105013 }
105014
105015-static inline int xfrm6_garbage_collect(struct dst_ops *ops)
105016+static int xfrm6_garbage_collect(struct dst_ops *ops)
105017 {
105018 struct net *net = container_of(ops, struct net, xfrm.xfrm6_dst_ops);
105019
105020- xfrm6_policy_afinfo.garbage_collect(net);
105021+ xfrm_garbage_collect_deferred(net);
105022 return dst_entries_get_fast(ops) > ops->gc_thresh * 2;
105023 }
105024
105025@@ -329,19 +334,19 @@ static struct ctl_table xfrm6_policy_table[] = {
105026
105027 static int __net_init xfrm6_net_init(struct net *net)
105028 {
105029- struct ctl_table *table;
105030+ ctl_table_no_const *table = NULL;
105031 struct ctl_table_header *hdr;
105032
105033- table = xfrm6_policy_table;
105034 if (!net_eq(net, &init_net)) {
105035- table = kmemdup(table, sizeof(xfrm6_policy_table), GFP_KERNEL);
105036+ table = kmemdup(xfrm6_policy_table, sizeof(xfrm6_policy_table), GFP_KERNEL);
105037 if (!table)
105038 goto err_alloc;
105039
105040 table[0].data = &net->xfrm.xfrm6_dst_ops.gc_thresh;
105041- }
105042+ hdr = register_net_sysctl(net, "net/ipv6", table);
105043+ } else
105044+ hdr = register_net_sysctl(net, "net/ipv6", xfrm6_policy_table);
105045
105046- hdr = register_net_sysctl(net, "net/ipv6", table);
105047 if (!hdr)
105048 goto err_reg;
105049
105050@@ -349,8 +354,7 @@ static int __net_init xfrm6_net_init(struct net *net)
105051 return 0;
105052
105053 err_reg:
105054- if (!net_eq(net, &init_net))
105055- kfree(table);
105056+ kfree(table);
105057 err_alloc:
105058 return -ENOMEM;
105059 }
105060diff --git a/net/ipx/ipx_proc.c b/net/ipx/ipx_proc.c
105061index e15c16a..7cf07aa 100644
105062--- a/net/ipx/ipx_proc.c
105063+++ b/net/ipx/ipx_proc.c
105064@@ -289,7 +289,7 @@ int __init ipx_proc_init(void)
105065 struct proc_dir_entry *p;
105066 int rc = -ENOMEM;
105067
105068- ipx_proc_dir = proc_mkdir("ipx", init_net.proc_net);
105069+ ipx_proc_dir = proc_mkdir_restrict("ipx", init_net.proc_net);
105070
105071 if (!ipx_proc_dir)
105072 goto out;
105073diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
105074index 61ceb4c..e788eb8 100644
105075--- a/net/irda/ircomm/ircomm_tty.c
105076+++ b/net/irda/ircomm/ircomm_tty.c
105077@@ -317,10 +317,10 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
105078 add_wait_queue(&port->open_wait, &wait);
105079
105080 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
105081- __FILE__, __LINE__, tty->driver->name, port->count);
105082+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
105083
105084 spin_lock_irqsave(&port->lock, flags);
105085- port->count--;
105086+ atomic_dec(&port->count);
105087 port->blocked_open++;
105088 spin_unlock_irqrestore(&port->lock, flags);
105089
105090@@ -355,7 +355,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
105091 }
105092
105093 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
105094- __FILE__, __LINE__, tty->driver->name, port->count);
105095+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
105096
105097 schedule();
105098 }
105099@@ -365,12 +365,12 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
105100
105101 spin_lock_irqsave(&port->lock, flags);
105102 if (!tty_hung_up_p(filp))
105103- port->count++;
105104+ atomic_inc(&port->count);
105105 port->blocked_open--;
105106 spin_unlock_irqrestore(&port->lock, flags);
105107
105108 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
105109- __FILE__, __LINE__, tty->driver->name, port->count);
105110+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
105111
105112 if (!retval)
105113 port->flags |= ASYNC_NORMAL_ACTIVE;
105114@@ -444,12 +444,12 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
105115
105116 /* ++ is not atomic, so this should be protected - Jean II */
105117 spin_lock_irqsave(&self->port.lock, flags);
105118- self->port.count++;
105119+ atomic_inc(&self->port.count);
105120 spin_unlock_irqrestore(&self->port.lock, flags);
105121 tty_port_tty_set(&self->port, tty);
105122
105123 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
105124- self->line, self->port.count);
105125+ self->line, atomic_read(&self->port.count));
105126
105127 /* Not really used by us, but lets do it anyway */
105128 self->port.low_latency = (self->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
105129@@ -985,7 +985,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
105130 tty_kref_put(port->tty);
105131 }
105132 port->tty = NULL;
105133- port->count = 0;
105134+ atomic_set(&port->count, 0);
105135 spin_unlock_irqrestore(&port->lock, flags);
105136
105137 wake_up_interruptible(&port->open_wait);
105138@@ -1342,7 +1342,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
105139 seq_putc(m, '\n');
105140
105141 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
105142- seq_printf(m, "Open count: %d\n", self->port.count);
105143+ seq_printf(m, "Open count: %d\n", atomic_read(&self->port.count));
105144 seq_printf(m, "Max data size: %d\n", self->max_data_size);
105145 seq_printf(m, "Max header size: %d\n", self->max_header_size);
105146
105147diff --git a/net/irda/irproc.c b/net/irda/irproc.c
105148index b9ac598..f88cc56 100644
105149--- a/net/irda/irproc.c
105150+++ b/net/irda/irproc.c
105151@@ -66,7 +66,7 @@ void __init irda_proc_register(void)
105152 {
105153 int i;
105154
105155- proc_irda = proc_mkdir("irda", init_net.proc_net);
105156+ proc_irda = proc_mkdir_restrict("irda", init_net.proc_net);
105157 if (proc_irda == NULL)
105158 return;
105159
105160diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
105161index a089b6b..3ca3b60 100644
105162--- a/net/iucv/af_iucv.c
105163+++ b/net/iucv/af_iucv.c
105164@@ -686,10 +686,10 @@ static void __iucv_auto_name(struct iucv_sock *iucv)
105165 {
105166 char name[12];
105167
105168- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
105169+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
105170 while (__iucv_get_sock_by_name(name)) {
105171 sprintf(name, "%08x",
105172- atomic_inc_return(&iucv_sk_list.autobind_name));
105173+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
105174 }
105175 memcpy(iucv->src_name, name, 8);
105176 }
105177diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
105178index da78793..bdd78cf 100644
105179--- a/net/iucv/iucv.c
105180+++ b/net/iucv/iucv.c
105181@@ -702,7 +702,7 @@ static int iucv_cpu_notify(struct notifier_block *self,
105182 return NOTIFY_OK;
105183 }
105184
105185-static struct notifier_block __refdata iucv_cpu_notifier = {
105186+static struct notifier_block iucv_cpu_notifier = {
105187 .notifier_call = iucv_cpu_notify,
105188 };
105189
105190diff --git a/net/key/af_key.c b/net/key/af_key.c
105191index 1847ec4..26ef732 100644
105192--- a/net/key/af_key.c
105193+++ b/net/key/af_key.c
105194@@ -3049,10 +3049,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
105195 static u32 get_acqseq(void)
105196 {
105197 u32 res;
105198- static atomic_t acqseq;
105199+ static atomic_unchecked_t acqseq;
105200
105201 do {
105202- res = atomic_inc_return(&acqseq);
105203+ res = atomic_inc_return_unchecked(&acqseq);
105204 } while (!res);
105205 return res;
105206 }
105207diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
105208index edb78e6..8dc654a 100644
105209--- a/net/l2tp/l2tp_eth.c
105210+++ b/net/l2tp/l2tp_eth.c
105211@@ -42,12 +42,12 @@ struct l2tp_eth {
105212 struct sock *tunnel_sock;
105213 struct l2tp_session *session;
105214 struct list_head list;
105215- atomic_long_t tx_bytes;
105216- atomic_long_t tx_packets;
105217- atomic_long_t tx_dropped;
105218- atomic_long_t rx_bytes;
105219- atomic_long_t rx_packets;
105220- atomic_long_t rx_errors;
105221+ atomic_long_unchecked_t tx_bytes;
105222+ atomic_long_unchecked_t tx_packets;
105223+ atomic_long_unchecked_t tx_dropped;
105224+ atomic_long_unchecked_t rx_bytes;
105225+ atomic_long_unchecked_t rx_packets;
105226+ atomic_long_unchecked_t rx_errors;
105227 };
105228
105229 /* via l2tp_session_priv() */
105230@@ -98,10 +98,10 @@ static int l2tp_eth_dev_xmit(struct sk_buff *skb, struct net_device *dev)
105231 int ret = l2tp_xmit_skb(session, skb, session->hdr_len);
105232
105233 if (likely(ret == NET_XMIT_SUCCESS)) {
105234- atomic_long_add(len, &priv->tx_bytes);
105235- atomic_long_inc(&priv->tx_packets);
105236+ atomic_long_add_unchecked(len, &priv->tx_bytes);
105237+ atomic_long_inc_unchecked(&priv->tx_packets);
105238 } else {
105239- atomic_long_inc(&priv->tx_dropped);
105240+ atomic_long_inc_unchecked(&priv->tx_dropped);
105241 }
105242 return NETDEV_TX_OK;
105243 }
105244@@ -111,12 +111,12 @@ static struct rtnl_link_stats64 *l2tp_eth_get_stats64(struct net_device *dev,
105245 {
105246 struct l2tp_eth *priv = netdev_priv(dev);
105247
105248- stats->tx_bytes = atomic_long_read(&priv->tx_bytes);
105249- stats->tx_packets = atomic_long_read(&priv->tx_packets);
105250- stats->tx_dropped = atomic_long_read(&priv->tx_dropped);
105251- stats->rx_bytes = atomic_long_read(&priv->rx_bytes);
105252- stats->rx_packets = atomic_long_read(&priv->rx_packets);
105253- stats->rx_errors = atomic_long_read(&priv->rx_errors);
105254+ stats->tx_bytes = atomic_long_read_unchecked(&priv->tx_bytes);
105255+ stats->tx_packets = atomic_long_read_unchecked(&priv->tx_packets);
105256+ stats->tx_dropped = atomic_long_read_unchecked(&priv->tx_dropped);
105257+ stats->rx_bytes = atomic_long_read_unchecked(&priv->rx_bytes);
105258+ stats->rx_packets = atomic_long_read_unchecked(&priv->rx_packets);
105259+ stats->rx_errors = atomic_long_read_unchecked(&priv->rx_errors);
105260 return stats;
105261 }
105262
105263@@ -166,15 +166,15 @@ static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb,
105264 nf_reset(skb);
105265
105266 if (dev_forward_skb(dev, skb) == NET_RX_SUCCESS) {
105267- atomic_long_inc(&priv->rx_packets);
105268- atomic_long_add(data_len, &priv->rx_bytes);
105269+ atomic_long_inc_unchecked(&priv->rx_packets);
105270+ atomic_long_add_unchecked(data_len, &priv->rx_bytes);
105271 } else {
105272- atomic_long_inc(&priv->rx_errors);
105273+ atomic_long_inc_unchecked(&priv->rx_errors);
105274 }
105275 return;
105276
105277 error:
105278- atomic_long_inc(&priv->rx_errors);
105279+ atomic_long_inc_unchecked(&priv->rx_errors);
105280 kfree_skb(skb);
105281 }
105282
105283diff --git a/net/llc/llc_proc.c b/net/llc/llc_proc.c
105284index 1a3c7e0..80f8b0c 100644
105285--- a/net/llc/llc_proc.c
105286+++ b/net/llc/llc_proc.c
105287@@ -247,7 +247,7 @@ int __init llc_proc_init(void)
105288 int rc = -ENOMEM;
105289 struct proc_dir_entry *p;
105290
105291- llc_proc_dir = proc_mkdir("llc", init_net.proc_net);
105292+ llc_proc_dir = proc_mkdir_restrict("llc", init_net.proc_net);
105293 if (!llc_proc_dir)
105294 goto out;
105295
105296diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
105297index 927b4ea..88a30e2 100644
105298--- a/net/mac80211/cfg.c
105299+++ b/net/mac80211/cfg.c
105300@@ -540,7 +540,7 @@ static int ieee80211_set_monitor_channel(struct wiphy *wiphy,
105301 ret = ieee80211_vif_use_channel(sdata, chandef,
105302 IEEE80211_CHANCTX_EXCLUSIVE);
105303 }
105304- } else if (local->open_count == local->monitors) {
105305+ } else if (local_read(&local->open_count) == local->monitors) {
105306 local->_oper_chandef = *chandef;
105307 ieee80211_hw_config(local, 0);
105308 }
105309@@ -3286,7 +3286,7 @@ static void ieee80211_mgmt_frame_register(struct wiphy *wiphy,
105310 else
105311 local->probe_req_reg--;
105312
105313- if (!local->open_count)
105314+ if (!local_read(&local->open_count))
105315 break;
105316
105317 ieee80211_queue_work(&local->hw, &local->reconfig_filter);
105318@@ -3420,8 +3420,8 @@ static int ieee80211_cfg_get_channel(struct wiphy *wiphy,
105319 if (chanctx_conf) {
105320 *chandef = chanctx_conf->def;
105321 ret = 0;
105322- } else if (local->open_count > 0 &&
105323- local->open_count == local->monitors &&
105324+ } else if (local_read(&local->open_count) > 0 &&
105325+ local_read(&local->open_count) == local->monitors &&
105326 sdata->vif.type == NL80211_IFTYPE_MONITOR) {
105327 if (local->use_chanctx)
105328 *chandef = local->monitor_chandef;
105329diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
105330index ef7a089..fe1caf7 100644
105331--- a/net/mac80211/ieee80211_i.h
105332+++ b/net/mac80211/ieee80211_i.h
105333@@ -28,6 +28,7 @@
105334 #include <net/ieee80211_radiotap.h>
105335 #include <net/cfg80211.h>
105336 #include <net/mac80211.h>
105337+#include <asm/local.h>
105338 #include "key.h"
105339 #include "sta_info.h"
105340 #include "debug.h"
105341@@ -1055,7 +1056,7 @@ struct ieee80211_local {
105342 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
105343 spinlock_t queue_stop_reason_lock;
105344
105345- int open_count;
105346+ local_t open_count;
105347 int monitors, cooked_mntrs;
105348 /* number of interfaces with corresponding FIF_ flags */
105349 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
105350diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
105351index f75e5f1..3d9ad4f 100644
105352--- a/net/mac80211/iface.c
105353+++ b/net/mac80211/iface.c
105354@@ -531,7 +531,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
105355 break;
105356 }
105357
105358- if (local->open_count == 0) {
105359+ if (local_read(&local->open_count) == 0) {
105360 res = drv_start(local);
105361 if (res)
105362 goto err_del_bss;
105363@@ -578,7 +578,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
105364 res = drv_add_interface(local, sdata);
105365 if (res)
105366 goto err_stop;
105367- } else if (local->monitors == 0 && local->open_count == 0) {
105368+ } else if (local->monitors == 0 && local_read(&local->open_count) == 0) {
105369 res = ieee80211_add_virtual_monitor(local);
105370 if (res)
105371 goto err_stop;
105372@@ -687,7 +687,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
105373 atomic_inc(&local->iff_promiscs);
105374
105375 if (coming_up)
105376- local->open_count++;
105377+ local_inc(&local->open_count);
105378
105379 if (hw_reconf_flags)
105380 ieee80211_hw_config(local, hw_reconf_flags);
105381@@ -725,7 +725,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
105382 err_del_interface:
105383 drv_remove_interface(local, sdata);
105384 err_stop:
105385- if (!local->open_count)
105386+ if (!local_read(&local->open_count))
105387 drv_stop(local);
105388 err_del_bss:
105389 sdata->bss = NULL;
105390@@ -889,7 +889,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
105391 }
105392
105393 if (going_down)
105394- local->open_count--;
105395+ local_dec(&local->open_count);
105396
105397 switch (sdata->vif.type) {
105398 case NL80211_IFTYPE_AP_VLAN:
105399@@ -950,7 +950,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
105400 }
105401 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
105402
105403- if (local->open_count == 0)
105404+ if (local_read(&local->open_count) == 0)
105405 ieee80211_clear_tx_pending(local);
105406
105407 /*
105408@@ -990,7 +990,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
105409
105410 ieee80211_recalc_ps(local, -1);
105411
105412- if (local->open_count == 0) {
105413+ if (local_read(&local->open_count) == 0) {
105414 ieee80211_stop_device(local);
105415
105416 /* no reconfiguring after stop! */
105417@@ -1001,7 +1001,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
105418 ieee80211_configure_filter(local);
105419 ieee80211_hw_config(local, hw_reconf_flags);
105420
105421- if (local->monitors == local->open_count)
105422+ if (local->monitors == local_read(&local->open_count))
105423 ieee80211_add_virtual_monitor(local);
105424 }
105425
105426diff --git a/net/mac80211/main.c b/net/mac80211/main.c
105427index e0ab432..36b7b94 100644
105428--- a/net/mac80211/main.c
105429+++ b/net/mac80211/main.c
105430@@ -174,7 +174,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
105431 changed &= ~(IEEE80211_CONF_CHANGE_CHANNEL |
105432 IEEE80211_CONF_CHANGE_POWER);
105433
105434- if (changed && local->open_count) {
105435+ if (changed && local_read(&local->open_count)) {
105436 ret = drv_config(local, changed);
105437 /*
105438 * Goal:
105439diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
105440index 4c5192e..04cc0d8 100644
105441--- a/net/mac80211/pm.c
105442+++ b/net/mac80211/pm.c
105443@@ -12,7 +12,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
105444 struct ieee80211_sub_if_data *sdata;
105445 struct sta_info *sta;
105446
105447- if (!local->open_count)
105448+ if (!local_read(&local->open_count))
105449 goto suspend;
105450
105451 ieee80211_scan_cancel(local);
105452@@ -59,7 +59,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
105453 cancel_work_sync(&local->dynamic_ps_enable_work);
105454 del_timer_sync(&local->dynamic_ps_timer);
105455
105456- local->wowlan = wowlan && local->open_count;
105457+ local->wowlan = wowlan && local_read(&local->open_count);
105458 if (local->wowlan) {
105459 int err = drv_suspend(local, wowlan);
105460 if (err < 0) {
105461@@ -125,7 +125,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
105462 WARN_ON(!list_empty(&local->chanctx_list));
105463
105464 /* stop hardware - this must stop RX */
105465- if (local->open_count)
105466+ if (local_read(&local->open_count))
105467 ieee80211_stop_device(local);
105468
105469 suspend:
105470diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
105471index 8fdadfd..a4f72b8 100644
105472--- a/net/mac80211/rate.c
105473+++ b/net/mac80211/rate.c
105474@@ -720,7 +720,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
105475
105476 ASSERT_RTNL();
105477
105478- if (local->open_count)
105479+ if (local_read(&local->open_count))
105480 return -EBUSY;
105481
105482 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
105483diff --git a/net/mac80211/util.c b/net/mac80211/util.c
105484index 725af7a..a21a20a 100644
105485--- a/net/mac80211/util.c
105486+++ b/net/mac80211/util.c
105487@@ -1643,7 +1643,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
105488 }
105489 #endif
105490 /* everything else happens only if HW was up & running */
105491- if (!local->open_count)
105492+ if (!local_read(&local->open_count))
105493 goto wake_up;
105494
105495 /*
105496@@ -1869,7 +1869,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
105497 local->in_reconfig = false;
105498 barrier();
105499
105500- if (local->monitors == local->open_count && local->monitors > 0)
105501+ if (local->monitors == local_read(&local->open_count) && local->monitors > 0)
105502 ieee80211_add_virtual_monitor(local);
105503
105504 /*
105505diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
105506index 6d77cce..36e2fc3 100644
105507--- a/net/netfilter/Kconfig
105508+++ b/net/netfilter/Kconfig
105509@@ -1096,6 +1096,16 @@ config NETFILTER_XT_MATCH_ESP
105510
105511 To compile it as a module, choose M here. If unsure, say N.
105512
105513+config NETFILTER_XT_MATCH_GRADM
105514+ tristate '"gradm" match support'
105515+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
105516+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
105517+ ---help---
105518+ The gradm match allows to match on grsecurity RBAC being enabled.
105519+ It is useful when iptables rules are applied early on bootup to
105520+ prevent connections to the machine (except from a trusted host)
105521+ while the RBAC system is disabled.
105522+
105523 config NETFILTER_XT_MATCH_HASHLIMIT
105524 tristate '"hashlimit" match support'
105525 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
105526diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
105527index fad5fdb..ba3672a 100644
105528--- a/net/netfilter/Makefile
105529+++ b/net/netfilter/Makefile
105530@@ -136,6 +136,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
105531 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
105532 obj-$(CONFIG_NETFILTER_XT_MATCH_ECN) += xt_ecn.o
105533 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
105534+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
105535 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
105536 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
105537 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
105538diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
105539index ec8114f..6b2bfba 100644
105540--- a/net/netfilter/ipset/ip_set_core.c
105541+++ b/net/netfilter/ipset/ip_set_core.c
105542@@ -1921,7 +1921,7 @@ done:
105543 return ret;
105544 }
105545
105546-static struct nf_sockopt_ops so_set __read_mostly = {
105547+static struct nf_sockopt_ops so_set = {
105548 .pf = PF_INET,
105549 .get_optmin = SO_IP_SET,
105550 .get_optmax = SO_IP_SET + 1,
105551diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
105552index 610e19c..08d0c3f 100644
105553--- a/net/netfilter/ipvs/ip_vs_conn.c
105554+++ b/net/netfilter/ipvs/ip_vs_conn.c
105555@@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
105556 /* Increase the refcnt counter of the dest */
105557 ip_vs_dest_hold(dest);
105558
105559- conn_flags = atomic_read(&dest->conn_flags);
105560+ conn_flags = atomic_read_unchecked(&dest->conn_flags);
105561 if (cp->protocol != IPPROTO_UDP)
105562 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
105563 flags = cp->flags;
105564@@ -899,7 +899,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
105565
105566 cp->control = NULL;
105567 atomic_set(&cp->n_control, 0);
105568- atomic_set(&cp->in_pkts, 0);
105569+ atomic_set_unchecked(&cp->in_pkts, 0);
105570
105571 cp->packet_xmit = NULL;
105572 cp->app = NULL;
105573@@ -1187,7 +1187,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
105574
105575 /* Don't drop the entry if its number of incoming packets is not
105576 located in [0, 8] */
105577- i = atomic_read(&cp->in_pkts);
105578+ i = atomic_read_unchecked(&cp->in_pkts);
105579 if (i > 8 || i < 0) return 0;
105580
105581 if (!todrop_rate[i]) return 0;
105582diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
105583index 5c34e8d..0d8eb7f 100644
105584--- a/net/netfilter/ipvs/ip_vs_core.c
105585+++ b/net/netfilter/ipvs/ip_vs_core.c
105586@@ -567,7 +567,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
105587 ret = cp->packet_xmit(skb, cp, pd->pp, iph);
105588 /* do not touch skb anymore */
105589
105590- atomic_inc(&cp->in_pkts);
105591+ atomic_inc_unchecked(&cp->in_pkts);
105592 ip_vs_conn_put(cp);
105593 return ret;
105594 }
105595@@ -1711,7 +1711,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
105596 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
105597 pkts = sysctl_sync_threshold(ipvs);
105598 else
105599- pkts = atomic_add_return(1, &cp->in_pkts);
105600+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
105601
105602 if (ipvs->sync_state & IP_VS_STATE_MASTER)
105603 ip_vs_sync_conn(net, cp, pkts);
105604diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
105605index fd3f444..ab28fa24 100644
105606--- a/net/netfilter/ipvs/ip_vs_ctl.c
105607+++ b/net/netfilter/ipvs/ip_vs_ctl.c
105608@@ -794,7 +794,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
105609 */
105610 ip_vs_rs_hash(ipvs, dest);
105611 }
105612- atomic_set(&dest->conn_flags, conn_flags);
105613+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
105614
105615 /* bind the service */
105616 old_svc = rcu_dereference_protected(dest->svc, 1);
105617@@ -1654,7 +1654,7 @@ proc_do_sync_ports(struct ctl_table *table, int write,
105618 * align with netns init in ip_vs_control_net_init()
105619 */
105620
105621-static struct ctl_table vs_vars[] = {
105622+static ctl_table_no_const vs_vars[] __read_only = {
105623 {
105624 .procname = "amemthresh",
105625 .maxlen = sizeof(int),
105626@@ -1989,7 +1989,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
105627 " %-7s %-6d %-10d %-10d\n",
105628 &dest->addr.in6,
105629 ntohs(dest->port),
105630- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
105631+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
105632 atomic_read(&dest->weight),
105633 atomic_read(&dest->activeconns),
105634 atomic_read(&dest->inactconns));
105635@@ -2000,7 +2000,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
105636 "%-7s %-6d %-10d %-10d\n",
105637 ntohl(dest->addr.ip),
105638 ntohs(dest->port),
105639- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
105640+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
105641 atomic_read(&dest->weight),
105642 atomic_read(&dest->activeconns),
105643 atomic_read(&dest->inactconns));
105644@@ -2471,7 +2471,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
105645
105646 entry.addr = dest->addr.ip;
105647 entry.port = dest->port;
105648- entry.conn_flags = atomic_read(&dest->conn_flags);
105649+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
105650 entry.weight = atomic_read(&dest->weight);
105651 entry.u_threshold = dest->u_threshold;
105652 entry.l_threshold = dest->l_threshold;
105653@@ -3010,7 +3010,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
105654 if (nla_put(skb, IPVS_DEST_ATTR_ADDR, sizeof(dest->addr), &dest->addr) ||
105655 nla_put_be16(skb, IPVS_DEST_ATTR_PORT, dest->port) ||
105656 nla_put_u32(skb, IPVS_DEST_ATTR_FWD_METHOD,
105657- (atomic_read(&dest->conn_flags) &
105658+ (atomic_read_unchecked(&dest->conn_flags) &
105659 IP_VS_CONN_F_FWD_MASK)) ||
105660 nla_put_u32(skb, IPVS_DEST_ATTR_WEIGHT,
105661 atomic_read(&dest->weight)) ||
105662@@ -3600,7 +3600,7 @@ static int __net_init ip_vs_control_net_init_sysctl(struct net *net)
105663 {
105664 int idx;
105665 struct netns_ipvs *ipvs = net_ipvs(net);
105666- struct ctl_table *tbl;
105667+ ctl_table_no_const *tbl;
105668
105669 atomic_set(&ipvs->dropentry, 0);
105670 spin_lock_init(&ipvs->dropentry_lock);
105671diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c
105672index 547ff33..c8c8117 100644
105673--- a/net/netfilter/ipvs/ip_vs_lblc.c
105674+++ b/net/netfilter/ipvs/ip_vs_lblc.c
105675@@ -118,7 +118,7 @@ struct ip_vs_lblc_table {
105676 * IPVS LBLC sysctl table
105677 */
105678 #ifdef CONFIG_SYSCTL
105679-static struct ctl_table vs_vars_table[] = {
105680+static ctl_table_no_const vs_vars_table[] __read_only = {
105681 {
105682 .procname = "lblc_expiration",
105683 .data = NULL,
105684diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
105685index 3f21a2f..a112e85 100644
105686--- a/net/netfilter/ipvs/ip_vs_lblcr.c
105687+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
105688@@ -289,7 +289,7 @@ struct ip_vs_lblcr_table {
105689 * IPVS LBLCR sysctl table
105690 */
105691
105692-static struct ctl_table vs_vars_table[] = {
105693+static ctl_table_no_const vs_vars_table[] __read_only = {
105694 {
105695 .procname = "lblcr_expiration",
105696 .data = NULL,
105697diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
105698index eadffb2..c2feeae 100644
105699--- a/net/netfilter/ipvs/ip_vs_sync.c
105700+++ b/net/netfilter/ipvs/ip_vs_sync.c
105701@@ -609,7 +609,7 @@ static void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp,
105702 cp = cp->control;
105703 if (cp) {
105704 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
105705- pkts = atomic_add_return(1, &cp->in_pkts);
105706+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
105707 else
105708 pkts = sysctl_sync_threshold(ipvs);
105709 ip_vs_sync_conn(net, cp->control, pkts);
105710@@ -771,7 +771,7 @@ control:
105711 if (!cp)
105712 return;
105713 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
105714- pkts = atomic_add_return(1, &cp->in_pkts);
105715+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
105716 else
105717 pkts = sysctl_sync_threshold(ipvs);
105718 goto sloop;
105719@@ -894,7 +894,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
105720
105721 if (opt)
105722 memcpy(&cp->in_seq, opt, sizeof(*opt));
105723- atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
105724+ atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
105725 cp->state = state;
105726 cp->old_state = cp->state;
105727 /*
105728diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
105729index 56896a4..dfe3806 100644
105730--- a/net/netfilter/ipvs/ip_vs_xmit.c
105731+++ b/net/netfilter/ipvs/ip_vs_xmit.c
105732@@ -1114,7 +1114,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
105733 else
105734 rc = NF_ACCEPT;
105735 /* do not touch skb anymore */
105736- atomic_inc(&cp->in_pkts);
105737+ atomic_inc_unchecked(&cp->in_pkts);
105738 goto out;
105739 }
105740
105741@@ -1206,7 +1206,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
105742 else
105743 rc = NF_ACCEPT;
105744 /* do not touch skb anymore */
105745- atomic_inc(&cp->in_pkts);
105746+ atomic_inc_unchecked(&cp->in_pkts);
105747 goto out;
105748 }
105749
105750diff --git a/net/netfilter/nf_conntrack_acct.c b/net/netfilter/nf_conntrack_acct.c
105751index a4b5e2a..13b1de3 100644
105752--- a/net/netfilter/nf_conntrack_acct.c
105753+++ b/net/netfilter/nf_conntrack_acct.c
105754@@ -62,7 +62,7 @@ static struct nf_ct_ext_type acct_extend __read_mostly = {
105755 #ifdef CONFIG_SYSCTL
105756 static int nf_conntrack_acct_init_sysctl(struct net *net)
105757 {
105758- struct ctl_table *table;
105759+ ctl_table_no_const *table;
105760
105761 table = kmemdup(acct_sysctl_table, sizeof(acct_sysctl_table),
105762 GFP_KERNEL);
105763diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
105764index de88c4a..ec84234 100644
105765--- a/net/netfilter/nf_conntrack_core.c
105766+++ b/net/netfilter/nf_conntrack_core.c
105767@@ -1739,6 +1739,10 @@ void nf_conntrack_init_end(void)
105768 #define DYING_NULLS_VAL ((1<<30)+1)
105769 #define TEMPLATE_NULLS_VAL ((1<<30)+2)
105770
105771+#ifdef CONFIG_GRKERNSEC_HIDESYM
105772+static atomic_unchecked_t conntrack_cache_id = ATOMIC_INIT(0);
105773+#endif
105774+
105775 int nf_conntrack_init_net(struct net *net)
105776 {
105777 int ret = -ENOMEM;
105778@@ -1764,7 +1768,11 @@ int nf_conntrack_init_net(struct net *net)
105779 if (!net->ct.stat)
105780 goto err_pcpu_lists;
105781
105782+#ifdef CONFIG_GRKERNSEC_HIDESYM
105783+ net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%08x", atomic_inc_return_unchecked(&conntrack_cache_id));
105784+#else
105785 net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
105786+#endif
105787 if (!net->ct.slabname)
105788 goto err_slabname;
105789
105790diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
105791index 4e78c57..ec8fb74 100644
105792--- a/net/netfilter/nf_conntrack_ecache.c
105793+++ b/net/netfilter/nf_conntrack_ecache.c
105794@@ -264,7 +264,7 @@ static struct nf_ct_ext_type event_extend __read_mostly = {
105795 #ifdef CONFIG_SYSCTL
105796 static int nf_conntrack_event_init_sysctl(struct net *net)
105797 {
105798- struct ctl_table *table;
105799+ ctl_table_no_const *table;
105800
105801 table = kmemdup(event_sysctl_table, sizeof(event_sysctl_table),
105802 GFP_KERNEL);
105803diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
105804index 5b3eae7..dd4b8fe 100644
105805--- a/net/netfilter/nf_conntrack_helper.c
105806+++ b/net/netfilter/nf_conntrack_helper.c
105807@@ -57,7 +57,7 @@ static struct ctl_table helper_sysctl_table[] = {
105808
105809 static int nf_conntrack_helper_init_sysctl(struct net *net)
105810 {
105811- struct ctl_table *table;
105812+ ctl_table_no_const *table;
105813
105814 table = kmemdup(helper_sysctl_table, sizeof(helper_sysctl_table),
105815 GFP_KERNEL);
105816diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
105817index b65d586..beec902 100644
105818--- a/net/netfilter/nf_conntrack_proto.c
105819+++ b/net/netfilter/nf_conntrack_proto.c
105820@@ -52,7 +52,7 @@ nf_ct_register_sysctl(struct net *net,
105821
105822 static void
105823 nf_ct_unregister_sysctl(struct ctl_table_header **header,
105824- struct ctl_table **table,
105825+ ctl_table_no_const **table,
105826 unsigned int users)
105827 {
105828 if (users > 0)
105829diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
105830index f641751..d3c5b51 100644
105831--- a/net/netfilter/nf_conntrack_standalone.c
105832+++ b/net/netfilter/nf_conntrack_standalone.c
105833@@ -471,7 +471,7 @@ static struct ctl_table nf_ct_netfilter_table[] = {
105834
105835 static int nf_conntrack_standalone_init_sysctl(struct net *net)
105836 {
105837- struct ctl_table *table;
105838+ ctl_table_no_const *table;
105839
105840 table = kmemdup(nf_ct_sysctl_table, sizeof(nf_ct_sysctl_table),
105841 GFP_KERNEL);
105842diff --git a/net/netfilter/nf_conntrack_timestamp.c b/net/netfilter/nf_conntrack_timestamp.c
105843index 7a394df..bd91a8a 100644
105844--- a/net/netfilter/nf_conntrack_timestamp.c
105845+++ b/net/netfilter/nf_conntrack_timestamp.c
105846@@ -42,7 +42,7 @@ static struct nf_ct_ext_type tstamp_extend __read_mostly = {
105847 #ifdef CONFIG_SYSCTL
105848 static int nf_conntrack_tstamp_init_sysctl(struct net *net)
105849 {
105850- struct ctl_table *table;
105851+ ctl_table_no_const *table;
105852
105853 table = kmemdup(tstamp_sysctl_table, sizeof(tstamp_sysctl_table),
105854 GFP_KERNEL);
105855diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
105856index daad602..384be13 100644
105857--- a/net/netfilter/nf_log.c
105858+++ b/net/netfilter/nf_log.c
105859@@ -353,7 +353,7 @@ static const struct file_operations nflog_file_ops = {
105860
105861 #ifdef CONFIG_SYSCTL
105862 static char nf_log_sysctl_fnames[NFPROTO_NUMPROTO-NFPROTO_UNSPEC][3];
105863-static struct ctl_table nf_log_sysctl_table[NFPROTO_NUMPROTO+1];
105864+static ctl_table_no_const nf_log_sysctl_table[NFPROTO_NUMPROTO+1] __read_only;
105865
105866 static int nf_log_proc_dostring(struct ctl_table *table, int write,
105867 void __user *buffer, size_t *lenp, loff_t *ppos)
105868@@ -384,14 +384,16 @@ static int nf_log_proc_dostring(struct ctl_table *table, int write,
105869 rcu_assign_pointer(net->nf.nf_loggers[tindex], logger);
105870 mutex_unlock(&nf_log_mutex);
105871 } else {
105872+ ctl_table_no_const nf_log_table = *table;
105873+
105874 mutex_lock(&nf_log_mutex);
105875 logger = rcu_dereference_protected(net->nf.nf_loggers[tindex],
105876 lockdep_is_held(&nf_log_mutex));
105877 if (!logger)
105878- table->data = "NONE";
105879+ nf_log_table.data = "NONE";
105880 else
105881- table->data = logger->name;
105882- r = proc_dostring(table, write, buffer, lenp, ppos);
105883+ nf_log_table.data = logger->name;
105884+ r = proc_dostring(&nf_log_table, write, buffer, lenp, ppos);
105885 mutex_unlock(&nf_log_mutex);
105886 }
105887
105888diff --git a/net/netfilter/nf_sockopt.c b/net/netfilter/nf_sockopt.c
105889index c68c1e5..8b5d670 100644
105890--- a/net/netfilter/nf_sockopt.c
105891+++ b/net/netfilter/nf_sockopt.c
105892@@ -43,7 +43,7 @@ int nf_register_sockopt(struct nf_sockopt_ops *reg)
105893 }
105894 }
105895
105896- list_add(&reg->list, &nf_sockopts);
105897+ pax_list_add((struct list_head *)&reg->list, &nf_sockopts);
105898 out:
105899 mutex_unlock(&nf_sockopt_mutex);
105900 return ret;
105901@@ -53,7 +53,7 @@ EXPORT_SYMBOL(nf_register_sockopt);
105902 void nf_unregister_sockopt(struct nf_sockopt_ops *reg)
105903 {
105904 mutex_lock(&nf_sockopt_mutex);
105905- list_del(&reg->list);
105906+ pax_list_del((struct list_head *)&reg->list);
105907 mutex_unlock(&nf_sockopt_mutex);
105908 }
105909 EXPORT_SYMBOL(nf_unregister_sockopt);
105910diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
105911index a11c5ff..aa413a7 100644
105912--- a/net/netfilter/nfnetlink_log.c
105913+++ b/net/netfilter/nfnetlink_log.c
105914@@ -79,7 +79,7 @@ static int nfnl_log_net_id __read_mostly;
105915 struct nfnl_log_net {
105916 spinlock_t instances_lock;
105917 struct hlist_head instance_table[INSTANCE_BUCKETS];
105918- atomic_t global_seq;
105919+ atomic_unchecked_t global_seq;
105920 };
105921
105922 static struct nfnl_log_net *nfnl_log_pernet(struct net *net)
105923@@ -561,7 +561,7 @@ __build_packet_message(struct nfnl_log_net *log,
105924 /* global sequence number */
105925 if ((inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) &&
105926 nla_put_be32(inst->skb, NFULA_SEQ_GLOBAL,
105927- htonl(atomic_inc_return(&log->global_seq))))
105928+ htonl(atomic_inc_return_unchecked(&log->global_seq))))
105929 goto nla_put_failure;
105930
105931 if (data_len) {
105932diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c
105933index 108120f..5b169db 100644
105934--- a/net/netfilter/nfnetlink_queue_core.c
105935+++ b/net/netfilter/nfnetlink_queue_core.c
105936@@ -665,7 +665,7 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
105937 * returned by nf_queue. For instance, callers rely on -ECANCELED to
105938 * mean 'ignore this hook'.
105939 */
105940- if (IS_ERR(segs))
105941+ if (IS_ERR_OR_NULL(segs))
105942 goto out_err;
105943 queued = 0;
105944 err = 0;
105945diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
105946index 1840989..6895744 100644
105947--- a/net/netfilter/nft_compat.c
105948+++ b/net/netfilter/nft_compat.c
105949@@ -225,7 +225,7 @@ target_dump_info(struct sk_buff *skb, const struct xt_target *t, const void *in)
105950 /* We want to reuse existing compat_to_user */
105951 old_fs = get_fs();
105952 set_fs(KERNEL_DS);
105953- t->compat_to_user(out, in);
105954+ t->compat_to_user((void __force_user *)out, in);
105955 set_fs(old_fs);
105956 ret = nla_put(skb, NFTA_TARGET_INFO, XT_ALIGN(t->targetsize), out);
105957 kfree(out);
105958@@ -421,7 +421,7 @@ match_dump_info(struct sk_buff *skb, const struct xt_match *m, const void *in)
105959 /* We want to reuse existing compat_to_user */
105960 old_fs = get_fs();
105961 set_fs(KERNEL_DS);
105962- m->compat_to_user(out, in);
105963+ m->compat_to_user((void __force_user *)out, in);
105964 set_fs(old_fs);
105965 ret = nla_put(skb, NFTA_MATCH_INFO, XT_ALIGN(m->matchsize), out);
105966 kfree(out);
105967diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
105968new file mode 100644
105969index 0000000..c566332
105970--- /dev/null
105971+++ b/net/netfilter/xt_gradm.c
105972@@ -0,0 +1,51 @@
105973+/*
105974+ * gradm match for netfilter
105975